大数据离线项目案例

离线项目总共包括以下部分:
1.数据的预处理阶段
2.数据的入库操作阶段
3.数据的分析阶段
4.数据保存到数据库阶段
5.数据的查询显示阶段
给出数据格式表和数据示例,请先阅读数据说明,再做相应题目。

在这里插入图片描述
原始数据:
qR8WRLrO2aQ:mienge:406:People & Blogs:599:2788:5:1:0:4UUEKhr6vfA:zvDPXgPiiWI:TxP1eXHJQ2Q:k5Kb1K0zVxU:hLP_mJIMNFg:tzNRSSTGF4o:BrUGfqJANn8:OVIc-mNxqHc:gdxtKvNiYXc:bHZRZ-1A-qk:GUJdU6uHyzU:eyZOjktUb5M:Dv15_9gnM2A:lMQydgG1N2k:U0gZppW_-2Y:dUVU6xpMc6Y:ApA6VEYI8zQ:a3_boc9Z_Pc:N1z4tYob0hM:2UJkU2neoBs
预处理之后的数据:
qR8WRLrO2aQ:mienge:406:People&Blogs:599:2788:5:1:0:4UUEKhr6vfA,zvDPXgPiiWI,TxP1eXHJQ2Q,k5Kb1K0zVxU,hLP_mJIMNFg,tzNRSSTGF4o,BrUGfqJANn8,OVIc-mNxqHc,gdxtKvNiYXc,bHZRZ-1A-qk,GUJdU6uHyzU,eyZOjktUb5M,Dv15_9gnM2A,lMQydgG1N2k,U0gZppW_-2Y,dUVU6xpMc6Y,ApA6VEYI8zQ,a3_boc9Z_Pc,N1z4tYob0hM,2UJkU2neoBs
1、对原始数据进行预处理,格式为上面给出的预处理之后的示例数据。

通过观察原始数据形式,可以发现,每个字段之间使用“:”分割,视频可以有多个视频类别,类别之间&符号分割,且分割的两边有空格字符,同时相关视频也是可以有多个,多个相关视频也是用“:”进行分割。为了分析数据时方便,我们首先进行数据重组清洗操作。
即:将每条数据的类别用“&”分割,同时去掉两边空格,多个“相关视频id”使用“,”进行分割

map:
public class Map extends Mapper<LongWritable, Text, NullWritable,Text> {

@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

    String trim = Utill(value.toString());

    String s = "";
    String ss = "";

    if (value.toString().split(":").length>=10){
        String[] split = trim.split(":");
        for (int i = 9; i < split.length; i++) {
            s += split[i]+",";
            if (i == split.length-1){
                s += split[i];
            }
        }
        if (split.length>= 10) {
            String[] sp = trim.split(split[9]);
            ss = sp[0] + s;
        }
    }
    context.write(NullWritable.get(),new Text(ss));

}
public static String Utill(String s){

    String trim = "";

    if (s.contains("&")){
        String[] split = s.split("&");
        for (String ss : split) {
            String t = ss.trim();
            trim += t+",";
        }
        return trim.substring(0,trim.lastIndexOf(","));
    }
    return "";
        }
 }

Reduce:
public class Reduce extends Reducer<NullWritable, Text,NullWritable,Text> {

@Override
protected void reduce(NullWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
    for (Text value : values) {
        if (value.getLength() != 0) {
            context.write(NullWritable.get(), value);
        }
    }
}

}

Driver :
public class Driver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

    Configuration conf = new Configuration();

    Job job = Job.getInstance(conf, "Driver");

    job.setInputFormatClass(TextInputFormat.class);
    TextInputFormat.addInputPath(job,new Path("hdfs://node1:8020/video.txt"));

    job.setMapperClass(Map.class);
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(Text.class);

    job.setReducerClass(Reduce.class);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(Text.class);

    job.setOutputFormatClass(TextOutputFormat.class);
    TextOutputFormat.setOutputPath(job,new Path("hdfs://node1:8020/b"));

    int b = job.waitForCompletion(true)?0:1;

    System.exit(b);
}
}

2、把预处理之后的数据进行入库到hive中
2.1创建数据库和表
创建数据库名字为:video
创建原始数据表:
视频表:video_ori 用户表:video_user_ori
创建ORC格式的表:
视频表:video_orc 用户表:video_user_orc

给出创建原始表语句
创建video_ori视频表:

create table video_ori(
videoId string,
uploader string,
age int,
category array,
length int,
views int,
rate float,
ratings int,
comments int,
relatedId array)
row format delimited
fields terminated by “:”
stored as textfile;

创建video_user_ori用户表
create table video_user_ori(
uploader string,
videos int,
friends int)
row format delimited
fields terminated by “,”
stored as textfile;

请写出ORC格式的建表语句:
创建video_orc表

create table video_orc(
videoId string,
uploader string,
age int,
category array,
length int,
views int,
rate float,
ratings int,
comments int,
relatedId array)
row format delimited
fields terminated by “:”
stored as orc;

创建video_user_orc表:
create table video_user_orc(
uploader string,
videos int,
friends int)
row format delimited
fields terminated by “,”
stored as orc;

2.2分别导入预处理之后的视频数据到原始表video_ori和导入原始用户表的数据到video_user_ori中
请写出导入语句
video_orc:
insert into table video_orc select * from video_ori;

video_user_orc:
insert into table video_user_orc select * from video_user_ori;

3、对入库之后的数据进行hivesql查询操作
3.1从视频表中统计出视频评分为5分的视频信息,把查询结果保存到/export/rate.txt
保存到 /opt/lianxi
请写出sql语句:

hive -e “select * from video.video_orc where rate=5;” > /opt/lianxi/rate.txt

3.2从视频表中统计出评论数大于100条的视频信息,把查询结果保存到/export/comments.txt
保存到 /opt/lianxi
请写出sql语句:

hive -e “select * from video.video_orc where comments>100;” > /opt/lianxi/comments.txt

替换:
sed -e ‘s/]//g’ a> b
sed -e ‘s/[//g’ b> c
sed -e ‘s/" //g’ c> d
4、把hive分析出的数据保存到hbase中
4.1建表语句
创建rate外部表的语句:
create external table rate(
videoId string,
uploader string,
age string,
category string,
length string,
views string,
rate string,
ratings string,
comments string,
relatedId string)
row format delimited
fields terminated by “\t”
stored as textfile;

创建comments外部表的语句:
create external table comments(
videoId string,
uploader string,
age string,
category string,
length string,
views string,
rate string,
ratings string,
comments string,
relatedId string)
row format delimited
fields terminated by “\t”
stored as textfile;

4.2
数据加载语句
load data local inpath ‘/opt/5.txt’ into table rate;
load data local inpath ‘/opt/100.txt’ into table comments;

4.3
创建hive hbase映射表
create table video.hbase_rate(
videoId string,
uploader string,
age int,
category array,
length int,
views int,
rate float,
ratings int,
comments int,
relatedId array)
stored by ‘org.apache.hadoop.hive.hbase.HBaseStorageHandler’
with serdeproperties(“hbase.columns.mapping” = “cf:uploader,cf:age,cf:category,cf:length,cf:views,cf:rate,cf:ratings,cf:comments,cf:relatedId”)
tblproperties(“hbase.table.name” = “hbase_rate”);

create table video.hbase_comments(
videoId string,
uploader string,
age int,
category array,
length int,
views int,
rate float,
ratings int,
comments int,
relatedId array)
stored by ‘org.apache.hadoop.hive.hbase.HBaseStorageHandler’
with serdeproperties(“hbase.columns.mapping” = “cf:uploader,cf:age,cf:category,cf:length,cf:views,cf:rate,cf:ratings,cf:comments,cf:relatedId”)
tblproperties(“hbase.table.name” = “hbase_comments”);

4.4
插入数据
insert into table hbase_rate select * from rate;

insert into table hbase_comments select * from comments;

5.通过hbaseapi进行查询操作
5.1请使用hbaseapi 对hbase_rate表,按照通过startRowKey=1和endRowKey=100进行扫描查询出结果。
在这里插入图片描述
5.2请使用hbaseapi对hbase_comments表,只查询comments列的值。

public class HBase_api {
public static void main(String[] args) throws IOException {
Configuration conf = new Configuration();
conf.set(“hbase.zookeeper.quorum”,“node1:2181,node2:2181,node3:2181”);
Connection connection = ConnectionFactory.createConnection(conf);
Table name = connection.getTable(TableName.valueOf(“hbase_rate”));

    test2(name);
    connection.close();
}

public static void test2(Table name) throws IOException {
    Scan scan = new Scan();
    ResultScanner scanner = name.getScanner(scan);
    for (Result result : scanner) {
        Cell[] cells = result.rawCells();
        for (Cell cell : cells) {
            if (Bytes.toString(CellUtil.cloneQualifier(cell)).equals("comments")){
                System.out.println(Bytes.toString(CellUtil.cloneValue(cell)));
                System.out.println("--------------");
            }
        }
    }
}

}

补充:

删除文件里首行内容
sed -i ‘1d’ filename
sed命令配合正则表达式可以替换指定文本,基本的script格式为:
sed -e ‘s/要被取代的字串/新的字串/g’

  • 4
    点赞
  • 18
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值