一.入门demo
1.1.create table u_data(
userid Int,
movieid Int,
rating Int,
unixtime String
)
row format delimited
fields TERMINATED by '\t'
stored as textfile;
1.2.获取GroupLens数据集页面wget http://files.grouplens.org/datasets/movielens/ml-100k.zip
1.3.unzip ml-100k.zip
1.4.load data local inpath '/root/ml-100k/u.data' overwrite into table u_data
1.5.SELECT COUNT(*) FROM u_data;
1.6.创建weekday_mapper.py:
import sys
import datetime
for line in sys.stdin:
line = line.strip()
userid, movieid, rating, unixtime = line.split('\t')
weekday = datetime.datetime.fromtimestamp(float(unixtime)).isoweekday()
print '\t'.join([userid, movieid, rating, str(weekday)])
1.7.利用python脚本处理数据进行分析:
CREATE TABLE u_data_new (
userid INT,
movieid INT,
rating INT,
weekday INT)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\t';
add FILE weekday_mapper.py;
INSERT OVERWRITE TABLE u_data_new
SELECT
TRANSFORM (userid, movieid, rating, unixtime)
USING 'python weekday_mapper.py'
AS (userid, movieid, rating, weekday)
FROM u_data;
SELECT weekday, COUNT(*) FROM u_data_new GROUP BY weekday;
二.内部表与外表的区别:
建表默认就是内部表:
create table employee(
id string,
name string,
address Map<string,string>,
sex int,
hobby Array<string>
)
partitioned by (classes string)
row format delimited
fields terminated by '\t'
collection items terminated by ','
map keys terminated by ':'
stored as textfile;
测试数据:
1 aa jobaddr:nanchang,birthadd:jiujiang 0 work,read,play
2 bb jobaddr:nanchang,birthadd:jiujiang 1 work,read,play
load data local inpath '/root/test.txt' into table employee partition(classes='0310')
drop table employee;
外部表:加上关键字external,同样创建
create external table employee(
id string,
name string,
address Map<string,string>,
sex int,
hobby Array<string>
)
partitioned by (classes string)
row format delimited
fields terminated by '\t'
collection items terminated by ','
map keys terminated by ':'
stored as textfile;
load数据;
删除表:
drop table employee;
总结:内表删表删对应的hdfs上的目录删了;外表删表不删对应的hdfs目录,数据仍保留,只是select查不到了。