本文简要介绍用flume做日志采集,然后用hbase做数据存储,最后通过hive查询数据输出文件的一种过程模式。
流程如下图:
假定:mysql已安装并启动,hadoop及zookeeper集群已安装部署并启动。
一、flume 日志采集
1. flume分布式安装(略)
本文采用 apache-flume-1.9.0,一主两从搭建,通过负载均衡方式将采集到的日志输入hbase存储
2. 基本配置
(1) # master 通过exec 方式监听日志文件输出
[root@master conf]# cd /usr/local/apache-flume-1.9.0-bin
[root@master conf]# vim conf/flume-client.conf
# agent1 name
agent1.channels = c1
agent1.sources = r1
agent1.sinks = k1 k2
# set channel
agent1.channels.c1.type = memory
agent1.channels.c1.capacity = 1000
agent1.channels.c1.transactionCapacity = 100
# set source # exec监控command
agent1.sources.r1.channels = c1
agent1.sources.r1.type = exec
agent1.sources.r1.command = tail -F /usr/local/apache-flume-1.9.0-bin/data/test_cluster.log
# set sink1
agent1.sinks.k1.channel = c1
agent1.sinks.k1.type = avro
agent1.sinks.k1.hostname = slave1
agent1.sinks.k1.port = 52020
# set sink2
agent1.sinks.k2.channel = c1
agent1.sinks.k2.type = avro
agent1.sinks.k2.hostname = slave2
agent1.sinks.k2.port = 52020
# set sink group
agent1.sinkgroups = g1
agent1.sinkgroups.g1.sinks = k1 k2
# load balance
agent1.sinkgroups.g1.processor.type = load_balance
agent1.sinkgroups.g1.processor.selector = round_robin
# set failover
#agent1.sinkgroups.g1.processor.type = failover
#agent1.sinkgroups.g1.processor.k1 = 10
#agent1.sinkgroups.g1.processor.k2 = 1
#agent1.sinkgroups.g1.processor.maxpenalty = 10000
# 启动master flume 节点
[root@master apache-flume-1.9.0-bin]# bin/flume-ng agent --conf conf --conf-file conf/flume-client.conf --name agent1 -Dflume.root.logger=INFO,console
(2) #slave1 节点配置
[root@slave1 home]# cd /usr/local/apache-flume-1.9.0-bin
[root@slave1 apache-flume-1.9.0-bin]# vim conf/flume-hbase.conf
# agent1 name
a1.channels = c1
a1.sources = r1
a1.sinks = k1
# set channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# other node, slave to master
a1.sources.r1.type = avro
a1.sources.r1.bind = slave1
a1.sources.r1.port = 52020
# set sink to hdfs
a1.sinks.k1.type = org.apache.flume.sink.hbase.HBaseSink
a1.sinks.k1.table = t_user
a1.sinks.k1.columnFamily = user_profile
a1.sinks.k1.serializer = org.apache.flume.sink.hbase.RegexHbaseEventSerializer
a1.sinks.k1.serializer.regex = \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]
a1.sinks.k1.serializer.colNames = userId,gender,province,birthday,lastLoginTime
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
# 启动slave1 flume 节点
[root@slave1 apache-flume-1.9.0-bin]# bin/flume-ng agent --conf conf --conf-file conf/flume-server.conf --name a1 -Dflume.root.logger=INFO,console
(3) #slave2 节点配置
[root@slave2 home]# cd /usr/local/apache-flume-1.9.0-bin
[root@slave2 apache-flume-1.9.0-bin]# vim conf/flume-hbase.conf
# agent1 name
a1.channels = c1
a1.sources = r1
a1.sinks = k1
# set channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# other node, slave to master
a1.sources.r1.type = avro
a1.sources.r1.bind = slave2
a1.sources.r1.port = 52020
# set sink to hdfs
a1.sinks.k1.type = org.apache.flume.sink.hbase.HBaseSink
a1.sinks.k1.table = t_user
a1.sinks.k1.columnFamily = user_profile
a1.sinks.k1.serializer = org.apache.flume.sink.hbase.RegexHbaseEventSerializer
a1.sinks.k1.serializer.regex = \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]
a1.sinks.k1.serializer.colNames = userId,gender,province,birthday,lastLoginTime
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
# 启动slave2 flume 节点
[root@slave2 apache-flume-1.9.0-bin]# bin/flume-ng agent --conf conf --conf-file conf/flume-server.conf --name a1 -Dflume.root.logger=INFO,console
二、hbase 数据存储
1. hbase 集群安装部署(略)
2. shell终端启动hbase,并创建表 t_user
3. 向表 t_user手动插入一些数据
4. 查看表 t_user数据
三、hive 查询输出
1. hive 集群安装部署(略)
2. 终端启动hive
3. 创建hive到hbase的映射表
create external table t_user(id string, userId string, gender string, province string, birthday string, lastLoginTime string)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001'
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES (
"hbase.columns.mapping" = ":key,user_profile:userId,user_profile:gender,user_profile:province,user_profile:birthday,user_profile:lastLoginTime")
TBLPROPERTIES("hbase.table.name" = "t_user");
4. 通过hive查询表数据
5. 从master 节点模拟日志文件输出
输出日志格式参考flume slave 节点正则匹配格式:
a1.sinks.k1.serializer.regex = \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]\\ \\[(.*?)\\]
6. hbase shell 终端查看数据是否存入hbase
7. hive查看是否同步hbase最新数据
8.写脚本提取数据文件(可以写crontab定时执行)
将空格分隔符替换为逗号