一、netcat
vim flume01.cnf
a1.sources = s1
a1.channels = c1
a1.sinks = k1
a1.sources.s1.type = netcat
a1.sources.s1.bind = 111.111.111.111
a1.sources.s1.port = 6666
a1.channels.c1.type = memory
a1.channels.c1.capacity = 100
a1.channels.c1.transactionCapacity = 10
a1.sinks.k1.type = logger
a1.sources.s1.channels = c1
a1.sinks.k1.channel = c1
flume-ng agent -n a1 -c conf/ -f /root/flume_job/logconf/flume01.conf -Dflume.root.logger=INFO,console
二、spooldir -> hdfs
a1,sources = s1
a1.channels = c1
a1.sinks = k1
a1.sources.s1.type = spooldir
a1.sources.s1.spoolDir = /root/data/flume
a1.sources.s1.ignorePattern = ^(.)*\\.bak$
a1.sources.s1.fileSuffix = \\.bak
a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/software/flume190/mydata/checkpoint
a1.channels.c1.dataDirs = /opt/software/flume190/mydata/data
a1.channels.c1.capacity = 100000
a1.channels.c1.transactionCapacity = 10000
a1.sinks.k1.type = hdfs
s1.sinks.k1.hdfs.path = hdfs://192.168.181.180:9820/flume/events/fakeorder/%Y-%m-%d/%H
s1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.rountUnit = minute
a1.sinks.k1.hdfs.filePrifix = log_%Y%m%d_%H
a1.sinks.k1.hdfs.fileSuffix = .log
a1.sinks.k1.hdfs.useLocalTimeStamp = true
a1.sinks.k1.hdfs.writeFormat = Text
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.rollSize = 1000
a1.sinks.k1.hdfs.threadsPool
a1.sinks.k1.hdfs.idleTimeout = 0
a1.sinks.k1.hdfs.minBlockReplicas = 1
a1.sources.s1.channels = c1
a1.sinks.k1.channel = c1
flume-ng agent -n a1 -c conf/ -f /root/flume_job/logconf/flume02.conf -Dflume.root.logger=INFO,console
三、avro
a1.sources = s1
a1.channels = c1
a1.sinks = k1
a1.sources.s1.type = avro
a1.sources.s1.bind = 192.168.181.180
a1.sources.s1.port = 7777
a1.sources.s1.threads = 5
a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/software/flume190/mydata/checkpoint
a1.channels.c1.dataDirs = /opt/software/flume190/mydata/data
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = hdfs://192.168.140.180:9820/flume/events/avroevent/%Y-%m-%d/%H
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = minute
a1.sinks.k1.hdfs.filePrefix = log_%Y%m%d_%H
a1.sinks.k1.hdfs.fileSuffix = .log
a1.sinks.k1.hdfs.useLocalTimeStamp = true
a1.sinks.k1.hdfs.writeFormat = Text
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollInterval = 0
a1.sinks.k1.hdfs.batchSize = 100
a1.sinks.k1.hdfs.threadsPoolSize = 10
a1.sinks.k1.hdfs.idleTimeout = 0
a1.sinks.k1.hdfs.minBlockReplicas = 1
a1.sources.s1.channels = c1
a1.sinks.k1.channel = c1
flume-ng agent -n a1 -c conf/ -f /root/flume_job/logconf/flume03.conf -Dflume.root.logger=INFO,console
flume-ng avro-client -H 192.168.140.180 -p 7777 -c /conf -F /root/data/flume/prohead1000.copy
四、taildir source
a1.sources = s1
a1.channels = c1
a1.sinks = k1
a1.sources.s1.type = taildir
a1.sources.s1.filegroups = f1 f2
a1.sources.s1.filegroups.f1 = /root/data/flume/tail01/pro.*\\.log
a1.sources.s1.filegroups.f2 = /root/data/flume/tail02/.*\\.log
a1.sources.s1.positionFile = /opt/software/flume190/data/taildir/taildir_position.json
a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/software/flume190/mydata/checkpoint
a1.channels.c1.dataDirs = /opt/software/flume190/mydata/data
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 1000
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = hdfs://192.168.140.180:9820/flume/events/tailevent/%Y-%m-%d/%H
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = minute
a1.sinks.k1.hdfs.filePrefix = log_%Y%m%d_%H
a1.sinks.k1.hdfs.fileSuffix = .log
a1.sinks.k1.hdfs.useLocalTimeStamp = true
a1.sinks.k1.hdfs.writeFormat = Text
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollInterval = 0
a1.sinks.k1.hdfs.batchSize = 1000
a1.sinks.k1.hdfs.threadsPoolSize = 4
a1.sinks.k1.hdfs.idleTimeout = 0
a1.sinks.k1.hdfs.minBlockReplicas = 1
a1.sources.s1.channels = c1
a1.sinks.k1.channel = c1
flume-ng agent -n a1 -c conf/ -f /root/flume_job/logconf/flume04.conf -Dflume.root.logger=INFO,console
五、hive sink
SET hive.support.concurrency = true;
SET hive.enforce.bucketing = true;
SET hive.exec.dynamic.partition.mode = nonstrict;
SET hive.txn.manager = org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
SET hive.compactor.initiator.on = true;
SET hive.compactor.worker.threads = 1;
create table familyinfo(
family_id int,
family_name string,
family_age int,
family_gender string
)
partitioned by(intime string)
clustered by(family_gender) into 2 buckets
row format delimited
fields terminated by ','
lines terminated by '\n'
stored as orc
tblproperties('transactional'='true');
alter table familyinfo add partition(intime='21-07-05-15');
cp /opt/software/hive312/hcatalog/share/hcatalog/*.jar /opt/software/flume190/lib/
a1.sources = s1
a1.channels = c1
a1.sinks = k1
a1.sources.s1.type = taildir
a1.sources.s1.filegroups = f1
a1.sources.s1.filegroups.f1 = /root/data/flume/tail03/.*\\.log
a1.sources.s1.positionFile = /opt/software/flume190/data/taildir/taildir_position.json
a1.sources.s1.batchSize = 10
a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/software/flume190/mydata/checkpoint
a1.channels.c1.dataDirs = /opt/software/flume190/mydata/data
a1.channels.c1.capacity = 100
a1.channels.c1.transactionCapacity = 10
a1.sinks.k1.type = hive
a1.sinks.k1.hive.metastore = thrift://192.168.140.180:9083
a1.sinks.k1.hive.database = test
a1.sinks.k1.hive.table = familyinfo
a1.sinks.k1.hive.partition = %y-%m-%d-%H
a1.sinks.k1.useLocalTimeStamp = true
a1.sinks.k1.autoCreatePartitions = false
a1.sinks.k1.batchSize = 10
a1.sinks.k1.round = true
a1.sinks.k1.roundValue = 10
a1.sinks.k1.roundUnit = minute
a1.sinks.k1.serializer = DELIMITED
a1.sinks.k1.serializer.delimiter = ","
a1.sinks.k1.serializer.serdeSeparator = ','
a1.sinks.k1.serializer.fieldnames = family_id,family_name,family_age,family_gender
a1.sources.s1.channels = c1
a1.sinks.k1.channel = c1
flume-ng agent -n a1 -c conf/ -f /root/flume_job/logconf/flume05.conf -Dflume.root.logger=INFO,console
六、hbase sink
create 'test':'stubs','base'
a1.sources = s1
a1.channels = c1
a1.sinks = k1
a1.sources.s1.type = taildir
a1.sources.s1.filegroups = f1
a1.sources.s1.filegroups.f1 = /root/data/flume/tail04/.*\\.log
a1.sources.s1.positionFile = /opt/software/flume190/data/taildir/taildir_position.json
a1.sources.s1.batchSize = 10
a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/software/flume190/mydata/checkpoint
a1.channels.c1.dataDirs = /opt/software/flume190/mydata/data
a1.channels.c1.capacity = 100
a1.channels.c1.transactionCapacity = 10
a1.sinks.k1.type = hbase2
a1.sinks.k1.table = test:stubs
a1.sinks.k1.columnFamily = base
a1.sinks.k1.serializer.regex = (.*),(.*),(.*),(.*)
a1.sinks.k1.serializer = org.apache.flume.sink.hbase.RegexHbaseEventSerializer
a1.sinks.k1.serializer.colNames = ROW_KEY,name,age,gender
a1.sinks.k1.serializer.rowKeyIndex = 0
a1.sinks.k1.batchSize = 10
a1.sources.s1.channels = c1
a1.sinks.k1.channel = c1
flume-ng agent -n a1 -c conf/ -f /root/flume_job/logconf/flume06.conf -Dflume.root.logger=INFO,console