一切就绪:
关键步骤:
#启动flume
flume-ng agent \
--conf $FLUME_HOME/conf \
--conf-file $FLUME_HOME/conf/streaming2.conf \
--name agent1 \
-Dflume.root.logger=INFO,console
#zookeeper启动
cd /usr/local/kafka
bin/zookeeper-server-start.sh config/zookeeper.properties
#kafka启动
cd /usr/local/kafka
./bin/kafka-server-start.sh config/server.properties
#kafka创建topic:zk
cd /usr/local/kafka
./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic streamingtopic
#kafka查看topic
cd /usr/local/kafka
./bin/kafka-topics.sh --list --zookeeper localhost:2181
#消费消息:zk
cd /usr/local/kafka
./bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic streamingtopic --from-beginning
打包之后放入用户主目录,然后执行
spark-submit --class org.apache.spark.examples.streaming.KafkaReceiverWordCount --master local[*] /home/hadoop/example-1.0.jar localhost:2181 test streamingtopic 1
启动idea的LoggerGenerator和KafkaStreamingApp