1flume启动监听自己
bin/flume-ng agent --conf conf --conf-file conf/netcat-logger.conf --name a1 -Dflume.root.logger=INFO,console
2.flume监听集群启动命令
./bin/flume-ng agent --conf ./conf/ --conf-file ./conf/flume-conf.properties --name a1 -Dflume.root.logger=INFO,console
3.flume自己letnet 自己
telnet 192.168.100.101 44444
4kafka命令
启动
nohup /opt/kafka-1.1.1/bin/kafka-server-start.sh /opt/kafka-1.1.1/config/server.properties > /tmp/kafka_logs 2>&1 &
关闭
kafka-server-stop.sh
创建topic
/opt/kafka-1.1.1/bin/kafka-topics.sh --create --replication-factor 2 --partitions 2 --topic test --zookeeper 192.168.100.101:2181,192.168.100.102:2181,192.168.100.103:2181,192.168.100.104:2181
登录zookeeper客户端
zkCli.sh -server localhost:2181
登录zookeeper客户端后查看broker
ls /brokers
查看已有topic
kafka-topics.sh --list --zookeeper 192.168.100.101:2181,192.168.100.102:2181,192.168.100.103:2181,192.168.100.104:2181
启动一个生产者
kafka-console-producer.sh --broker-list 192.168.100.101:9092,192.168.100.102:9092,192.168.100.103:9092,192.168.100.104:9092 --topic test
启动一个消费者
kafka-console-consumer.sh --zookeeper 192.168.100.101:2181,192.168.100.102:2181,192.168.100.103:2181,192.168.100.104:2181 --topic test
出现的错误
kafka集群 log4j:ERROR Could not read configuration file from URL
/opt/kafka-1.1.1/bin/kafka-run-class.sh里下选项改为绝对路径
LOG4J_DIR="/opt/kafka-1.1.1/config/tools-log4j.properties"
5 .mysql赋权
grant select on *.* to 'root'@'%' identified by '123456' with grant option;
6.启动sqoop查看数据库
sqoop list-databases --connect jdbc:mysql://node001:3306/ --username root --password 123456
7.Mysql错误
The last packet sent successfully to the server was 0 milliseconds ago
修改/opt/my.cnf
bind-address=0.0.0.0
8.使用sqoop时,mysql表导入HDFS
sqoop import --connect jdbc:mysql://node001:3306/sqoop --username root --password 123456 --table dept -m 1 --target-dir /sqoop/dept
9.flink端口
192.168.100.101:8081