头歌flume拦截器答案

第一关Flume拦截器的使用


start-dfs.sh
hadoop dfs -mkdir /flume

 


# Define source, channel, sink
#agent名称为a1


# Define source
#source类型配置为avro,监听8888端口,后台会自动发送数据到该端口
#拦截后台发送过来的数据,将y.开头的保留下来


# Define channel
#channel配置为memery


# Define sink
#落地到 hdfs://localhost:9000/flume目录下
#根据时间落地,3s
#数据格式DataStream


a1.sources = source1
a1.sinks = sink1
a1.channels = channel1
 
# 配置source组件
a1.sources.source1.type = avro
a1.sources.source1.bind  = 127.0.0.1
    a1.sources.source1.port  =  8888
##定义文件上传完后的后缀,默认是.COMPLETED
a1.sources.source1.fileSuffix=.FINISHED
##默认是2048,如果文件行数据量超过2048字节(1k),会被截断,导致数据丢失
a1.sources.source1.deserializer.maxLineLength=5120
 #正则过滤拦截器

a1.sources.source1.interceptors = i1

a1.sources.source1.interceptors.i1.type = regex_filter

a1.sources.source1.interceptors.i1.regex = ^y.*

#如果excludeEvents设为false,表示过滤掉不是以A开头的events。

#如果excludeEvents设为true,则表示过滤掉以A开头的events。

a1.sources.source1.interceptors.i1.excludeEvents = false
# 配置sink组件
a1.sinks.sink1.type = hdfs
a1.sinks.sink1.hdfs.path =hdfs://localhost:9000/flume
#上传文件的前缀
a1.sinks.sink1.hdfs.filePrefix = FlumeData.
#上传文件的后缀
a1.sinks.sink1.hdfs.fileSuffix = .log
#积攒多少个Event才flush到HDFS一次
a1.sinks.sink1.hdfs.batchSize= 100
a1.sinks.sink1.hdfs.fileType = DataStream
a1.sinks.sink1.hdfs.writeFormat =Text
 
## roll:滚动切换:控制写文件的切换规则
## 按文件体积(字节)来切
a1.sinks.sink1.hdfs.rollSize = 512000
## 按event条数切   
a1.sinks.sink1.hdfs.rollCount = 1000000
## 按时间间隔切换文件,多久生成一个新的文件
a1.sinks.sink1.hdfs.rollInterval = 4
 
## 控制生成目录的规则
a1.sinks.sink1.hdfs.round = true
##多少时间单位创建一个新的文件夹
a1.sinks.sink1.hdfs.roundValue = 10
a1.sinks.sink1.hdfs.roundUnit = minute
 
#是否使用本地时间戳
a1.sinks.sink1.hdfs.useLocalTimeStamp = true
 
# channel组件配置
a1.channels.channel1.type = memory
## event条数
a1.channels.channel1.capacity = 500000
##flume事务控制所需要的缓存容量600条event
a1.channels.channel1.transactionCapacity = 600
 
# 绑定source、channel和sink之间的连接
a1.sources.source1.channels = channel1
a1.sinks.sink1.channel = channel1
 

第二关 Flume自定义拦截器

 Flume1/flume.conf配置文件

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#  http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.


# The configuration file needs to define the sources, 
# the channels and the sinks.
# Sources, channels and sinks are defined per agent, 
# in this case called 'agent'


# Define source, channel, sink
#agent名为a1;


# Define and configure an Spool directory source
#采集 /opt/flume/data目录下所有文件

# Configure channel
#channel选择memery

# Define and configure a hdfs sink
#落地到hdfs的hdfs://localhost:9000/flume/文件名的前缀/文件名上的日期
#文件格式设为DataStream
#根据时间回滚,3s
a1.sources=source1  
a1.channels=channel1  
a1.sinks=sink1  
a1.sources.source1.type=spooldir  
a1.sources.source1.spoolDir=/opt/flume/data
a1.sources.source1.fileHeader=true  
a1.sources.source1.basenameHeader=true  
a1.sources.source1.interceptors=i1  
a1.sources.source1.interceptors.i1.type=com.yy.RegexExtractorExtInterceptor$Builder  
a1.sources.source1.interceptors.i1.regex=(.*)\\.(.*)\\.(.*)  
a1.sources.source1.interceptors.i1.extractorHeader=true  
a1.sources.source1.interceptors.i1.extractorHeaderKey=basename  
a1.sources.source1.interceptors.i1.serializers=s1 s2 s3  
a1.sources.source1.interceptors.i1.serializers.s1.name=one  
a1.sources.source1.interceptors.i1.serializers.s2.name=two  
a1.sources.source1.interceptors.i1.serializers.s3.name=three  
a1.sources.source1.channels=channel1  
a1.sinks.sink1.type=hdfs  
a1.sinks.sink1.channel=channel1  
a1.sinks.sink1.hdfs.path=hdfs://localhost:9000/flume/%{one}/%{three}  
a1.sinks.sink1.hdfs.round=true  
a1.sinks.sink1.hdfs.roundValue=10  
a1.sinks.sink1.hdfs.roundUnit=minute  
a1.sinks.sink1.hdfs.fileType=DataStream  
a1.sinks.sink1.hdfs.writeFormat=Text  
a1.sinks.sink1.hdfs.rollInterval=0  
a1.sinks.sink1.hdfs.rollSize=10240  
a1.sinks.sink1.hdfs.rollCount=0  
a1.sinks.sink1.hdfs.idleTimeout=60  
a1.channels.channel1.type=memory  
a1.channels.channel1.capacity=10000  
a1.channels.channel1.transactionCapacity=1000  
a1.channels.channel1.keep-alive=30  

Flume1/src/main/java/com/yy/RegexExtractorExtInterceptor.java 配置文件

 

package com.yy;
/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.lang.StringUtils;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.interceptor.Interceptor;
import org.apache.flume.interceptor.RegexExtractorInterceptorPassThroughSerializer;
import org.apache.flume.interceptor.RegexExtractorInterceptorSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;

public class RegexExtractorExtInterceptor implements Interceptor {  
  
    static final String REGEX = "regex";  
    static final String SERIALIZERS = "serializers";  
  
    // 增加代码开始  
  
    static final String EXTRACTOR_HEADER = "extractorHeader";  
    static final boolean DEFAULT_EXTRACTOR_HEADER = false;  
    static final String EXTRACTOR_HEADER_KEY = "extractorHeaderKey";  
  
    // 增加代码结束  
  
    private static final Logger logger = LoggerFactory  
            .getLogger(RegexExtractorExtInterceptor.class);  
  
    private final Pattern regex;  
    private final List<NameAndSerializer> serializers;  
  
    // 增加代码开始  
  
    private final boolean extractorHeader;  
    private final String extractorHeaderKey;  
  
    // 增加代码结束  
  
    private RegexExtractorExtInterceptor(Pattern regex,  
            List<NameAndSerializer> serializers, boolean extractorHeader,  
            String extractorHeaderKey) {  
        this.regex = regex;  
        this.serializers = serializers;  
        this.extractorHeader = extractorHeader;  
        this.extractorHeaderKey = extractorHeaderKey;  
    }  
  
    @Override  
    public void initialize() {  
        // NO-OP...  
    }  
  
    @Override  
    public void close() {  
        // NO-OP...  
    }  
  
    @Override  
    public Event intercept(Event event) {  
        String tmpStr;  
        if(extractorHeader)  
        {  
            tmpStr = event.getHeaders().get(extractorHeaderKey);  
        }  
        else  
        {  
            tmpStr=new String(event.getBody(),  
                    Charsets.UTF_8);  
        }  
          
        Matcher matcher = regex.matcher(tmpStr);  
        Map<String, String> headers = event.getHeaders();  
        if (matcher.find()) {  
            for (int group = 0, count = matcher.groupCount(); group < count; group++) {  
                int groupIndex = group + 1;  
                if (groupIndex > serializers.size()) {  
                    if (logger.isDebugEnabled()) {  
                        logger.debug(  
                                "Skipping group {} to {} due to missing serializer",  
                                group, count);  
                    }  
                    break;  
                }  
                NameAndSerializer serializer = serializers.get(group);  
                if (logger.isDebugEnabled()) {  
                    logger.debug("Serializing {} using {}",  
                            serializer.headerName, serializer.serializer);  
                }  
                headers.put(serializer.headerName, serializer.serializer  
                        .serialize(matcher.group(groupIndex)));  
            }  
        }  
        return event;  
    }  
  
    @Override  
    public List<Event> intercept(List<Event> events) {  
        List<Event> intercepted = Lists.newArrayListWithCapacity(events.size());  
        for (Event event : events) {  
            Event interceptedEvent = intercept(event);  
            if (interceptedEvent != null) {  
                intercepted.add(interceptedEvent);  
            }  
        }  
        return intercepted;  
    }  
  
    public static class Builder implements Interceptor.Builder {  
  
        private Pattern regex;  
        private List<NameAndSerializer> serializerList;  
  
        // 增加代码开始  
  
        private boolean extractorHeader;  
        private String extractorHeaderKey;  
  
        // 增加代码结束  
  
        private final RegexExtractorInterceptorSerializer defaultSerializer = new RegexExtractorInterceptorPassThroughSerializer();  
  
        @Override  
        public void configure(Context context) {  
            String regexString = context.getString(REGEX);  
            Preconditions.checkArgument(!StringUtils.isEmpty(regexString),  
                    "Must supply a valid regex string");  
  
            regex = Pattern.compile(regexString);  
            regex.pattern();  
            regex.matcher("").groupCount();  
            configureSerializers(context);  
  
            // 增加代码开始  
            extractorHeader = context.getBoolean(EXTRACTOR_HEADER,  
                    DEFAULT_EXTRACTOR_HEADER);  
  
            if (extractorHeader) {  
                extractorHeaderKey = context.getString(EXTRACTOR_HEADER_KEY);  
                Preconditions.checkArgument(  
                        !StringUtils.isEmpty(extractorHeaderKey),  
                        "必须指定要抽取内容的header key");  
            }  
            // 增加代码结束  
        }  
  
        private void configureSerializers(Context context) {  
            String serializerListStr = context.getString(SERIALIZERS);  
            Preconditions.checkArgument(  
                    !StringUtils.isEmpty(serializerListStr),  
                    "Must supply at least one name and serializer");  
  
            String[] serializerNames = serializerListStr.split("\\s+");  
  
            Context serializerContexts = new Context(  
                    context.getSubProperties(SERIALIZERS + "."));  
  
            serializerList = Lists  
                    .newArrayListWithCapacity(serializerNames.length);  
            for (String serializerName : serializerNames) {  
                Context serializerContext = new Context(  
                        serializerContexts.getSubProperties(serializerName  
                                + "."));  
                String type = serializerContext.getString("type", "DEFAULT");  
                String name = serializerContext.getString("name");  
                Preconditions.checkArgument(!StringUtils.isEmpty(name),  
                        "Supplied name cannot be empty.");  
  
                if ("DEFAULT".equals(type)) {  
                    serializerList.add(new NameAndSerializer(name,  
                            defaultSerializer));  
                } else {  
                    serializerList.add(new NameAndSerializer(name,  
                            getCustomSerializer(type, serializerContext)));  
                }  
            }  
        }  
  
        private RegexExtractorInterceptorSerializer getCustomSerializer(  
                String clazzName, Context context) {  
            try {  
                RegexExtractorInterceptorSerializer serializer = (RegexExtractorInterceptorSerializer) Class  
                        .forName(clazzName).newInstance();  
                serializer.configure(context);  
                return serializer;  
            } catch (Exception e) {  
                logger.error("Could not instantiate event serializer.", e);  
                Throwables.propagate(e);  
            }  
            return defaultSerializer;  
        }  
  
        @Override  
        public Interceptor build() {  
            Preconditions.checkArgument(regex != null,  
                    "Regex pattern was misconfigured");  
            Preconditions.checkArgument(serializerList.size() > 0,  
                    "Must supply a valid group match id list");  
            return new RegexExtractorExtInterceptor(regex, serializerList,  
                    extractorHeader, extractorHeaderKey);  
        }  
    }  
  
    static class NameAndSerializer {  
        private final String headerName;  
        private final RegexExtractorInterceptorSerializer serializer;  
  
        public NameAndSerializer(String headerName,  
                RegexExtractorInterceptorSerializer serializer) {  
            this.headerName = headerName;  
            this.serializer = serializer;  
        }  
    }  
}  

 

  • 3
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值