==================spout================================
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import backtype.storm.utils.Utils;
import java.util.List;
import java.util.Map;
import java.util.Random;
/**
* Created by ke on 2016/11/7.
*/
public class RandomWordSpout extends BaseRichSpout {
String[] words={"first","one","second","two","third","three"};
SpoutOutputCollector collector;
/**
* 定义输出tuple的字段
* @param declarer
*/
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word","index"));
}
/**
* 初始方法,只执行一次
* @param conf
* @param context
* @param collector
*/
@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector=collector;
}
/**
* 不断地往下一个组件发送tuple消息
* 这里是该spout组件的核心逻辑
*/
@Override
public void nextTuple() {
Random rand=new Random();
int index=rand.nextInt(words.length);
String word=words[index];
this.collector.emit(new Values(word,index));
Utils.sleep(5000);
}
}
======================================================
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
/**
* Created by ke on 2016/11/7.
*/
public class UpperBolt extends BaseBasicBolt{
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
String word=(String)input.getValues().get(0);
String word1=input.getString(0);
int index=input.getIntegerByField("index");
collector.emit(new Values(word.toUpperCase()));
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("upperword"));
}
}
======================================================
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
import org.apache.log4j.Logger;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Map;
import java.util.UUID;
/**
* Created by ke on 2016/11/7.
*/
public class SuffixBolt extends BaseBasicBolt{
FileWriter writer=null;
Logger logger= null;
@Override
public void prepare(Map stormConf, TopologyContext context) {
super.prepare(stormConf, context);
try {
Logger.getLogger("file");
writer= new FileWriter("/home/hadoop/stormdemo/"+ UUID.randomUUID());
logger.info("writer init success:"+writer);
} catch (IOException e) {
logger.info("SuffixBolt prepare error:"+e);
throw new RuntimeException(e);
}
}
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
String upperword=input.getString(0);
try {
writer.write(upperword+"wuke is ok");
writer.write("\n");
writer.flush();
} catch (IOException e) {
logger.info("SuffixBolt executor error:"+e);
throw new RuntimeException(e);
}
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("suffixword"));
}
}
====================================================
//storm拓扑结构
import backtype.storm.Config;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.StormTopology;
import backtype.storm.topology.TopologyBuilder;
import clojure.main;
/**
* Created by ke on 2016/11/7.
*/
public class TopoMain {
public static void main(String[] args)throws Exception{
TopologyBuilder builder=new TopologyBuilder();
//4为并发度,表示用个 executor来执行这个组件
//setNumTasks(8),设置的是该组件执行时的并发task数量,也就意味着1个executor会执行2个task
//如果不设置setNumTasks,默认一个executor上面执行一个task
//work进程上运行executor进程,excecutor进行上运行task进程
builder.setSpout("randomspout",new RandomWordSpout(),4).setNumTasks(8);
builder.setBolt("upperbolt",new UpperBolt(),4).shuffleGrouping("randomspout");
builder.setBolt("suffixbolt",new SuffixBolt(),4).shuffleGrouping("upperbolt");
StormTopology stormTopology=builder.createTopology();
Config conf=new Config();
//设定此job有4个work完成
conf.setNumWorkers(4);
conf.setDebug(true);
//设置事务机制,发送出去的字段是否需要放回字段,确认是否成功发送
conf.setNumAckers(0);
StormSubmitter.submitTopology("demotopo",conf,stormTopology);
}
}
===========================log4j.xml==============================
<?xml version="1.0" encoding="GB2312" ?>
<!DOCTYPE log4j:configuration SYSTEM "http://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/xml/doc-files/log4j.dtd">
<log4j:configuration debug="true">
<!-- 输出日志到控制台 ConsoleAppender -->
<appender name="console"
class="org.apache.log4j.ConsoleAppender">
<param name="Threshold" value="info"></param>
<layout class="org.apache.log4j.TTCCLayout">
<param name="ConversionPattern" value="TTCCLayout"></param>
</layout>
</appender>
<!-- 输出日志到文件 每天一个文件 -->
<appender name="dailyRollingFile"
class="org.apache.log4j.DailyRollingFileAppender">
<param name="Threshold" value="info"></param>
<param name="ImmediateFlush" value="true"></param>
<param name="File" value="/home/hadoop/logs/dailyRollingFile.log"></param>
<param name="DatePattern" value="'.'yyyy-MM-dd'.log'"></param>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="[%d{yyyy-MM-dd HH:mm:ss\} %-5p] [%t] {%c:%L}-%m%n"></param>
</layout>
</appender>
<!-- 输出日志到文件 文件大小到达指定尺寸的时候产生一个新的文件 -->
<appender name="railyFile"
class="org.apache.log4j.RollingFileAppender">
<param name="File" value="/home/hadoop/logs/railyFile.log"></param>
<param name="ImmediateFlush" value="true"/>
<param name="Threshold" value="info"></param>
<param name="Append" value="true"></param>
<param name="MaxFileSize" value="30KB"></param>
<param name="MaxBackupIndex" value="100"></param>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="[%d{yyyy-MM-dd HH:mm:ss\} %-5p] [%t] {%c:%L}-%m%n"></param>
</layout>
</appender>
<!-- 输出日志到文件 -->
<appender name="file"
class="org.apache.log4j.FileAppender">
<param name="File" value="/home/hadoop/logs/file.log"></param>
<param name="Threshold" value="info"></param>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="[%d{yyyy-MM-dd HH:mm:ss\} %-5p] [%t] {%c:%L}-%m%n"></param>
</layout>
</appender>
<!--
定义全局的日志输出级别,但是在输出目的地的配置中配置的具体输出级别优先级高于全局定义的优先级。
如果在railyFile中定义<param name="Threshold" value="info"></param>,那么将会把info以上级别的信息输出
-->
<root>
<priority value="debug" />
<appender-ref ref="console" />
<appender-ref ref="dailyRollingFile" />
<appender-ref ref="railyFile" />
<appender-ref ref="file" />
</root>
</log4j:configuration>