flume 开发案例+自定义

复制和多路复用

截屏2020-05-04下午6.38.08

cd job 
mkdir g1
mkdir -p /opt/module/data/flume3

A1

# Name the components on this agent
a1.sources = r1
a1.sinks = k1 k2
a1.channels = c1 c2

# 将数据流复制给所有channel
a1.sources.r1.selector.type = replicating

# Describe/configure the source
a1.sources.r1.type = TAILDIR
a1.sources.r1.positionFile = /opt/module/flume/f1.json
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /opt/module/hive/logs/hive.log

# Describe the sink

# sink端的avro是一个数据发送者
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop130
a1.sinks.k1.port = 4141

a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop130
a1.sinks.k2.port = 4142

# Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

a1.channels.c2.type = memory
a1.channels.c2.capacity = 1000
a1.channels.c2.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1 c2
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c2

a2

# Name the components on this agent
a2.sources = r1
a2.sinks = k1
a2.channels = c1

# Describe/configure the source

# source端的avro是一个数据接收服务
a2.sources.r1.type = avro
a2.sources.r1.bind = hadoop130
a2.sources.r1.port = 4141

# Describe the sink
a2.sinks.k1.type = hdfs
a2.sinks.k1.hdfs.path = hdfs://hadoop130:9820/flume2/%Y%m%d/%H

#上传文件的前缀
a2.sinks.k1.hdfs.filePrefix = flume2-

#是否按照时间滚动文件夹
a2.sinks.k1.hdfs.round = true

#多少时间单位创建一个新的文件夹
a2.sinks.k1.hdfs.roundValue = 1

#重新定义时间单位
a2.sinks.k1.hdfs.roundUnit = hour

#是否使用本地时间戳
a2.sinks.k1.hdfs.useLocalTimeStamp = true

#积攒多少个Event才flush到HDFS一次
a2.sinks.k1.hdfs.batchSize = 100

#设置文件类型,可支持压缩
a2.sinks.k1.hdfs.fileType = DataStream

#多久生成一个新的文件
a2.sinks.k1.hdfs.rollInterval = 600

#设置每个文件的滚动大小大概是128M
a2.sinks.k1.hdfs.rollSize = 134217700

#文件的滚动与Event数量无关
a2.sinks.k1.hdfs.rollCount = 0

# Describe the channel
a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100


# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1

a3

# Name the components on this agent
a3.sources = r1
a3.sinks = k1
a3.channels = c2

# Describe/configure the source
a3.sources.r1.type = avro
a3.sources.r1.bind = hadoop130
a3.sources.r1.port = 4142

# Describe the sink
a3.sinks.k1.type = file_roll
a3.sinks.k1.sink.directory = /opt/module/data/flume3

# Describe the channel
a3.channels.c2.type = memory
a3.channels.c2.capacity = 1000
a3.channels.c2.transactionCapacity = 100

# Bind the source and sink to the channel
a3.sources.r1.channels = c2
a3.sinks.k1.channel = c2

[vanas@hadoop130 flume]$ bin/flume-ng agent -n a3 -c conf -f job/g1/a3.conf -Dflume.root.logger=INFO,console

[vanas@hadoop130 flume]$ bin/flume-ng agent -n a2 -c conf -f job/g1/a2.conf -Dflume.root.logger=INFO,console

[vanas@hadoop130 flume]$ bin/flume-ng agent -n a1 -c conf -f job/g1/a1.conf -Dflume.root.logger=INFO,console
[vanas@hadoop130 ~]$ cd /opt/module/data/flume3
[vanas@hadoop130 flume3]$ ll
总用量 140
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:15 1588580150231-1
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:20 1588580150231-10
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:20 1588580150231-11
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:21 1588580150231-12
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:21 1588580150231-13
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:22 1588580150231-14
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:22 1588580150231-15
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:23 1588580150231-16
-rw-rw-r--. 1 vanas vanas 139590 5月   4 16:24 1588580150231-17
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:24 1588580150231-18
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:16 1588580150231-2
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:16 1588580150231-3
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:17 1588580150231-4
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:17 1588580150231-5
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:18 1588580150231-6
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:18 1588580150231-7
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:19 1588580150231-8
-rw-rw-r--. 1 vanas vanas      0 5月   4 16:19 1588580150231-9

负载均衡和故障转移

截屏2020-05-04下午6.40.33

A1

# Name the components on this agent
a1.sources = r1
a1.channels = c1
a1.sinkgroups = g1
a1.sinks = k1 k2

# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = 0.0.0.0
a1.sources.r1.port = 44444

a1.sinkgroups.g1.processor.type = failover
a1.sinkgroups.g1.processor.priority.k1 = 5
a1.sinkgroups.g1.processor.priority.k2 = 10
a1.sinkgroups.g1.processor.maxpenalty = 10000

# Describe the sink
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop130
a1.sinks.k1.port = 4141

a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop130
a1.sinks.k2.port = 4142

# Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinkgroups.g1.sinks = k1 k2
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c1

a2

# Name the components on this agent
a2.sources = r1
a2.sinks = k1
a2.channels = c1

# Describe/configure the source
a2.sources.r1.type = avro
a2.sources.r1.bind = hadoop130
a2.sources.r1.port = 4141

# Describe the sink
a2.sinks.k1.type = logger

# Describe the channel
a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1

a3

# Name the components on this agent
a3.sources = r1
a3.sinks = k1
a3.channels = c2

# Describe/configure the source
a3.sources.r1.type = avro
a3.sources.r1.bind = hadoop130
a3.sources.r1.port = 4142

# Describe the sink
a3.sinks.k1.type = logger

# Describe the channel
a3.channels.c2.type = memory
a3.channels.c2.capacity = 1000
a3.channels.c2.transactionCapacity = 100

# Bind the source and sink to the channel
a3.sources.r1.channels = c2
a3.sinks.k1.channel = c2
[vanas@hadoop130 flume]$ bin/flume-ng agent -n a3 -c conf -f job/g2/a3.conf -Dflume.root.logger=INFO,console

[vanas@hadoop130 flume]$ bin/flume-ng agent -n a2 -c conf -f job/g2/a2.conf -Dflume.root.logger=INFO,console

[vanas@hadoop130 flume]$ bin/flume-ng agent -n a1 -c conf -f job/g2/a1.conf -Dflume.root.logger=INFO,console

聚合

截屏2020-05-04下午6.41.04

A1

# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = exec
a1.sources.r1.command = tail -F /opt/module/hive/logs/hive.log
a1.sources.r1.shell = /bin/bash -c

# Describe the sink
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop134
a1.sinks.k1.port = 4141

# Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1

A2

# Name the components on this agent
a2.sources = r1
a2.sinks = k1
a2.channels = c1

# Describe/configure the source
a2.sources.r1.type = netcat
a2.sources.r1.bind = hadoop133
a2.sources.r1.port = 44444

# Describe the sink
a2.sinks.k1.type = avro
a2.sinks.k1.hostname = hadoop134
a2.sinks.k1.port = 4141

# Use a channel which buffers events in memory
a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1

A3

# Name the components on this agent
a3.sources = r1
a3.sinks = k1
a3.channels = c1

# Describe/configure the source
a3.sources.r1.type = avro
a3.sources.r1.bind = hadoop134
a3.sources.r1.port = 4141

# Describe the sink
# Describe the sink
a3.sinks.k1.type = logger

# Describe the channel
a3.channels.c1.type = memory
a3.channels.c1.capacity = 1000
a3.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a3.sources.r1.channels = c1
a3.sinks.k1.channel = c1
[vanas@hadoop134 flume]$ bin/flume-ng agent -n a3 -c conf -f job/g3/a3.conf -Dflume.root.logger=INFO,console

[vanas@hadoop133 flume]$ bin/flume-ng agent -n a2 -c conf -f job/g3/a2.conf -Dflume.root.logger=INFO,console

[vanas@hadoop130 flume]$ bin/flume-ng agent -n a1 -c conf -f job/g3/a1.conf -Dflume.root.logger=INFO,console
xsync ../flume/

习题

a1 102 接收TailDirSource数据,监控/var/log/*.log,复制输出到a2 a3

# Name the components on this agent
a1.sources = r1
a1.sinks = k1 k2
a1.channels = c1 c2

# 将数据流复制给所有channel
a1.sources.r1.selector.type = replicating

# Describe/configure the source
a1.sources.r1.type = TAILDIR
a1.sources.r1.positionFile = /opt/module/flume/f1.json
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /opt/module/hive/logs/.*\.log

# Describe the sink

# sink端的avro是一个数据发送者
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop133
a1.sinks.k1.port = 4141

a1.sinks.k2.type = avro
a1.sinks.k2.hostname = hadoop134
a1.sinks.k2.port = 4141

# Describe the channel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

a1.channels.c2.type = memory
a1.channels.c2.capacity = 1000
a1.channels.c2.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1 c2
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c2

a2 103 接收a1数据,输出到HDFS,failover到本地FileRoll

# Name the components on this agent
a2.sources = r1
a2.channels = c1
a2.sinkgroups = g1
a2.sinks = k1 k2

# Describe/configure the source
# source端的avro是一个数据接收服务
a2.sources.r1.type = avro
a2.sources.r1.bind = 0.0.0.0
a2.sources.r1.port = 4141

a2.sinkgroups.g1.processor.type = failover
a2.sinkgroups.g1.processor.priority.k1 = 10
a2.sinkgroups.g1.processor.priority.k2 = 5
a2.sinkgroups.g1.processor.maxpenalty = 10000

# Describe the sink
a2.sinks.k1.type = hdfs
a2.sinks.k1.hdfs.path =/flume4/%Y%m%d/%H
#上传文件的前缀
a2.sinks.k1.hdfs.filePrefix = flume4-
#是否按照时间滚动文件夹
a2.sinks.k1.hdfs.round = true
#多少时间单位创建一个新的文件夹
a2.sinks.k1.hdfs.roundValue = 1
#重新定义时间单位
a2.sinks.k1.hdfs.roundUnit = hour
#是否使用本地时间戳
a2.sinks.k1.hdfs.useLocalTimeStamp = true
#积攒多少个Event才flush到HDFS一次
a2.sinks.k1.hdfs.batchSize = 100
#设置文件类型,可支持压缩
a2.sinks.k1.hdfs.fileType = DataStream
#多久生成一个新的文件
a2.sinks.k1.hdfs.rollInterval = 600
#设置每个文件的滚动大小大概是128M
a2.sinks.k1.hdfs.rollSize = 134217700
#文件的滚动与Event数量无关
a2.sinks.k1.hdfs.rollCount = 0


a2.sinks.k2.type = file_roll
a2.sinks.k2.sink.directory = /opt/module/data/flume


# Describe the channel
a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a2.sources.r1.channels = c1
a2.sinkgroups.g1.sinks = k1 k2
a2.sinks.k1.channel = c1
a2.sinks.k2.channel = c1

a3 104 接收a1数据,输出到控制台

# Name the components on this agent
a3.sources = r1
a3.sinks = k1
a3.channels = c1

# Describe/configure the source
a3.sources.r1.type = avro
a3.sources.r1.bind = 0.0.0.0
a3.sources.r1.port = 4141

# Describe the sink
a3.sinks.k1.type = logger

# Describe the channel
a3.channels.c1.type = memory
a3.channels.c1.capacity = 1000
a3.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a3.sources.r1.channels = c1
a3.sinks.k1.channel = c1

自定义Interceptor

A1

# Name the components on this agent
a1.sources = r1
a1.sinks = k1 k2
a1.channels = c1 c2

# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = 0.0.0.0
a1.sources.r1.port = 44444
#拦截器链
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = com.atguigu.interceptor.MyInterceptor$MyBuilder
#多路复用模式
a1.sources.r1.selector.type = multiplexing
a1.sources.r1.selector.header = type
a1.sources.r1.selector.mapping.alphabet = c1
a1.sources.r1.selector.mapping.not_alphabet = c2
# Describe the sink
a1.sinks.k1.type = avro
a1.sinks.k1.hostname = hadoop133
a1.sinks.k1.port = 4141

a1.sinks.k2.type=avro
a1.sinks.k2.hostname = hadoop134
a1.sinks.k2.port = 4242

# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Use a channel which buffers events in memory
a1.channels.c2.type = memory
a1.channels.c2.capacity = 1000
a1.channels.c2.transactionCapacity = 100


# Bind the source and sink to the channel
a1.sources.r1.channels = c1 c2
a1.sinks.k1.channel = c1
a1.sinks.k2.channel = c2

A2

a2.sources = r1
a2.sinks = k1
a2.channels = c1

a2.sources.r1.type = avro
a2.sources.r1.bind = hadoop133
a2.sources.r1.port = 4141

a2.sinks.k1.type = logger

a2.channels.c1.type = memory
a2.channels.c1.capacity = 1000
a2.channels.c1.transactionCapacity = 100

a2.sinks.k1.channel = c1
a2.sources.r1.channels = c1

a3

a3.sources = r1
a3.sinks = k1
a3.channels = c1

a3.sources.r1.type = avro
a3.sources.r1.bind = hadoop134
a3.sources.r1.port = 4242

a3.sinks.k1.type = logger

a3.channels.c1.type = memory
a3.channels.c1.capacity = 1000
a3.channels.c1.transactionCapacity = 100

a3.sinks.k1.channel = c1
a3.sources.r1.channels = c1
[vanas@hadoop133 flume]$ bin/flume-ng agent -n a2 -c conf -f job/interceptor/a2.conf -Dflume.root.logger=INFO,console
[vanas@hadoop134 flume]$ bin/flume-ng agent -n a3 -c conf -f job/interceptor/a3.conf -Dflume.root.logger=INFO,console
[vanas@hadoop130 flume]$ bin/flume-ng agent -n a1 -c conf -f job/interceptor/a1.conf -Dflume.root.logger=INFO,console


[vanas@hadoop130 ~]$ nc localhost 44444
[vanas@hadoop130 ~]$  sudo netstat -tunlp | grep 44444
tcp6       0      0 :::44444                :::*                    LISTEN      16203/java  

打包重新上传jar包到 lib ,vim my interceptor

import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.interceptor.Interceptor;

import java.util.List;
import java.util.Map;

/**
 * 根据输入数据的首字母不同,添加不同的header来让ChannelSelector处理
 *
 * @author Vanas
 * @create 2020-05-05 9:30 上午
 */

public class MyInterceptor implements Interceptor {
    /**
     * 初始化方法,新建interceptor时候使用
     */
    public void initialize() {

    }

    /**
     * 更改方法 对event进行处理
     *
     * @param event 传入数据
     * @return 处理好的数据
     */
    public Event intercept(Event event) {
//        获取传入事件的Header
        Map<String, String> headers = event.getHeaders();
//        获取body,根据首字母不同来添加header
        byte[] body = event.getBody();
//        获取首字母
        String s = new String(body);
        char c = s.charAt(0);
//        判断首字母是不是字母
//        s.matches("^[a-z,A-Z]")
        if ((c <= 'z' && c >= 'a') || (c <= 'Z' && c >= 'A')) {
//            是字母
            headers.put("type", "alphabet");
        } else {
//            不是字母
            headers.put("type", "not_alphabet");
        }
//        返回处理好的事件
        return event;

    }

    /**
     * 批处理方法,对传入的一批数据进行处理
     *
     * @param list
     * @return 处理好的数据
     */
    public List<Event> intercept(List<Event> list) {
        for (Event event : list) {
            intercept(event);
        }
        return list;
    }

    /**
     * 如果有需要关闭的资源在这个方法中关闭
     */
    public void close() {

    }

    //    框架通过调用Builder来创建Interceptor实例
    public static class MyBuilder implements Interceptor.Builder {
        /**
         * 创建实例的方法
         *
         * @return 新的interceptor
         */
        public Interceptor build() {
            return new MyInterceptor();
        }

        /**
         * 读取配置文件的方法
         *
         * @param context 配置文件
         */
        public void configure(Context context) {

        }
    }
}

自定义Source

import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.PollableSource;
import org.apache.flume.conf.Configurable;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.source.AbstractSource;

/**
 * @author Vanas
 * @create 2020-05-05 11:32 上午
 */
public class Mysource extends AbstractSource implements Configurable, PollableSource {
    private String prefix;
    private Long interval;

    /**
     * 拉取事件并交给ChannelProcess处理方法
     *
     * @return
     * @throws EventDeliveryException
     */
    public Status process() throws EventDeliveryException {
        Status status = null;

        try {
            // This try clause includes whatever Channel/Event operations you want to do

            // Receive new data
//            我们通过外部方法拉取数据
            Event e = getSomeData();

            // Store the Event into this Source's associated Channel(s)
            getChannelProcessor().processEvent(e);

            status = Status.READY;
        } catch (Throwable t) {
            // Log exception, handle individual exceptions as needed

            status = Status.BACKOFF;

            // re-throw all Errors
            if (t instanceof Error) {
                throw (Error) t;
            }
        }
        return status;
    }

    /**
     * 拉取数据并包装成Event
     *
     * @return
     */
    private Event getSomeData() throws InterruptedException {
//        通过随机数模拟拉取数据
        int i = (int) (Math.random() * 1000);
//        添加前缀
        String message = prefix + i;

        Thread.sleep(interval);
//       包装成Event
        Event event = new SimpleEvent();
        event.setBody(message.getBytes());
        return event;

    }

    /**
     * 如果拉取不到数据,backoof时间的增长速度
     *
     * @return 增长量
     */
    public long getBackOffSleepIncrement() {
        return 1000;
    }

    /**
     * 最大等待时间
     *
     * @return 时间
     */
    public long getMaxBackOffSleepInterval() {
        return 10000;
    }

    /**
     * 来自于cofigurable 自定义Source
     *
     * @param context 配置文件
     */
    public void configure(Context context) {
        prefix = context.getString("prefff", "XXXX");
        interval = context.getLong("interval", 500L);
    }
}

打包重新上传jar包到 lib ,vim mysource

# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = com.atguigu.interceptor.Mysource
a1.sources.r1.interval = 100
a1.sources.r1.prefff = ABCD-

# Describe the sink
a1.sinks.k1.type = logger

# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
[vanas@hadoop130 flume]$ bin/flume-ng agent -n a1 -c conf -f job/mysource.conf -Dflume.root.logger=INFO,console

自定义Sink

import org.apache.flume.*;
import org.apache.flume.conf.Configurable;
import org.apache.flume.sink.AbstractSink;

import java.io.IOException;

/**
 * @author Vanas
 * @create 2020-05-05 2:06 下午
 */
public class MySink extends AbstractSink implements Configurable {
    /**
     * 该方法被调用时,会从Channel中拉取数据并处理
     *
     * @return 处理的状态
     * @throws EventDeliveryException 处理失败时候会抛出该异常
     */
    public Status process() throws EventDeliveryException {
        Status status = null;

        // Start transaction 开启事务
        Channel ch = getChannel();
        Transaction txn = ch.getTransaction();
        txn.begin();
        try {
            // This try clause includes whatever Channel operations you want to do
//            拉取数据,如果拉不到event是null,
            Event event;
            while ((event = ch.take()) == null) {
                Thread.sleep(100);
            }


//            若果成功拉到数据
            // Send the Event to the external repository.
            storeSomeData(event);

            txn.commit();
            status = Status.READY;
        } catch (Throwable t) {
            txn.rollback();

            // Log exception, handle individual exceptions as needed

            status = Status.BACKOFF;

            // re-throw all Errors
            if (t instanceof Error) {
                throw (Error) t;
            }
        } finally {
            txn.close();
        }
        return status;
    }

    /**
     * 要储存的数据
     *
     * @param event
     */
    private void storeSomeData(Event event) throws IOException {
//       将数据打印到控制台
        byte[] body = event.getBody();
        System.out.write(body);
        System.out.println();
    }

    /**
     * 配置方法
     *
     * @param context 配置文件
     */
    public void configure(Context context) {

    }
}

打包重新上传jar包到 lib ,vim mysink

a1.sources = r1
a1.sinks = k1
a1.channels = c1

# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = 0.0.0.0
a1.sources.r1.port = 44444

# Describe the sink
a1.sinks.k1.type = com.atguigu.interceptor.MySink
#a1.sinks.k1.prefix = atguigu:
a1.sinks.k1.suffix = :atguigu

# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
[vanas@hadoop130 flume]$ bin/flume-ng agent -n a1 -c conf -f job/mysink.conf -Dflume.root.logger=INFO,console
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值