【若泽大数据实战第十八天】Hive - 常用建表及语法
程序员文章站
2024-03-22 16:31:46
...
- 创建数据库
CREATE (DATABASE|SCHEMA) [IF NOT EXISTS] database_name
[COMMENT database_comment]
[LOCATION hdfs_path]
[WITH DBPROPERTIES (property_name=property_value, ...)];
--案例
create database if not exists test
comment 'test'
location '/hive/test'
with dbproperties ('date'='20180514','creator'='hadoop');
- 删除数据库
DROP (DATABASE|SCHEMA) [IF EXISTS] database_name [RESTRICT|CASCADE];
--说明
--[RESTRICT|CASCADE]默认为RESTRICT
--RESTRICT:删除时只要数据库里有表,数据库就不会删除
--CASCADE:级联删除,有表一并删除
--案例
drop database if exists test cascade;
- 查看数据库
--1
hive> desc database test;
OK
test test hdfs://192.168.242.201:9000/hive/test hadoop USER
Time taken: 0.073 seconds, Fetched: 1 row(s)
--2
hive> desc database extended test;
OK
test test hdfs://192.168.242.201:9000/hive/test hadoop USER {date=20180514, creator=hadoop}
Time taken: 0.057 seconds, Fetched: 1 row(s)
--3
hive> show databases like 'test*';
OK
test
Time taken: 0.095 seconds, Fetched: 1 row(s)
- 创建表
--语法
CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name -- (Note: TEMPORARY available in Hive 0.14.0 and later)
[(col_name data_type [COMMENT col_comment], ... [constraint_specification])]
[COMMENT table_comment]
[PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)]
[CLUSTERED BY (col_name, col_name, ...) [SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS]
[SKEWED BY (col_name, col_name, ...) -- (Note: Available in Hive 0.10.0 and later)]
ON ((col_value, col_value, ...), (col_value, col_value, ...), ...)
[STORED AS DIRECTORIES]
[
[ROW FORMAT row_format]
[STORED AS file_format]
| STORED BY 'storage.handler.class.name' [WITH SERDEPROPERTIES (...)] -- (Note: Available in Hive 0.6.0 and later)
]
[LOCATION hdfs_path]
[TBLPROPERTIES (property_name=property_value, ...)] -- (Note: Available in Hive 0.6.0 and later)
[AS select_statement]; -- (Note: Available in Hive 0.5.0 and later; not supported for external tables)
--案例
create table emp(
empno int,
ename string,
job string,
mgr int,
hiredate string,
sal double,
comm double,
deptno int)
partitioned by(age int comment "hahaha", id int comment "heiheihei")
--上面这条语句是用于创建分区表]
clustered by(sal, comm)into 4 buckets
--上面这条语句用于创建桶表]
row format delimited fields terminated by'\n'--列与列之间的分割符默认为\t,即tab键
LOCATION '/hive/test/emp';--指定存储在hdfs上的路径
- 删除表
DROP TABLE [IF EXISTS] table_name [PURGE];
--说明
--drop一个external(外部表)表时,操作会删除表的元信息和原始数据,原始数据保存在hdfs回收站.Trash/Current directory目录下,前提是回收站已启用。
--数据恢复
--若要恢复drop误删的表,可以创建与原表相同的表结构,再从hdfs上将删除的数据导入即可恢复。
-
内部表和外部表
- 内部表
删除内部表,删除元数据库中的元数据和hdfs上的数据
默认创建内部表 - 外部表
删除外部表,只删除元数据中的元数据 - 建议
建议使用外部表,但如果公司中权限配置合理,内部表外部表无所谓,可以通过hive+sentry来给不同用户配置权限
- 内部表
-
建表数据导入
第一种:
create table emp(id int,name string)row format delimited fields terminated by ‘\n’ locatipn “hdfs_path”;
hdfs dfs -put 数据文件 hdfs_path第二种:
create table emp(id int,name string)row format delimited fields terminated by ‘\n’;
load data [local] inpath “linux或hdfs上的文件路径” [overwrite] into table emp;
大数据课程推荐: