[hadoop@node1 software]$ tar xf sqoop-1.4.6-cdh5.7.0.tar.gz -C ../app/
[hadoop@node1 software]$ tar xf hbase-1.2.0-cdh5.7.0.tar.gz -C ../app/
==== ZOOKEEPER 安装配置===
[hadoop@node1 software]$ tar xf zookeeper-3.4.5-cdh5.7.0.tar.gz -C ../app/
[hadoop@node1 ~]$ cd /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/conf/
[hadoop@node1 conf]$ cp -rp zoo_sample.cfg zoo.cfg
修改 zoo.cfg
dataDir=/home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/data
dataLogDir=/home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/logs
clientPort=2181
mkdir -p /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/data
mkdir -p /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/logs
添加环境变量
[hadoop@node1 ~]$ cat /home/hadoop/.bash_profile |grep ZOOKEEPER_HOME
export ZOOKEEPER_HOME=/home/hadoop/app/zookeeper-3.4.5-cdh5.7.0
export PATH=$PATH:$ZOOKEEPER_HOME/bin
[hadoop@node1 ~]$ source /home/hadoop/.bash_profile
启动 zookeeper
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkServer.sh start
JMX enabled by default
Using config: /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/../conf/zoo.cfg
Starting zookeeper ... /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkServer.sh: line 120: [: /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/data: binary operator expected
STARTED
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkServer.sh status
JMX enabled by default
Using config: /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/../conf/zoo.cfg
Mode: standalone
[hadoop@node1 ~]$ jps
3707 NodeManager
3605 ResourceManager
3159 NameNode
4079 QuorumPeerMain
3260 DataNode
4140 Jps
3427 SecondaryNameNode
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkCli.sh -server 192.168.137.251:2181
==== kafka 伪分布式配置==
[hadoop@node1 ~]$ tar xf /home/hadoop/software/kafka_2.10-0.9.0.1.tgz -C /home/hadoop/app/
[hadoop@node1 config]$ pwd
/home/hadoop/app/kafka_2.10-0.9.0.1/config
cp -rp server.properties server.properties.source
需要修改的地方
broker.id=0
log.dirs=/home/hadoop/app/kafka_2.10-0.9.0.1/data
zookeeper.connect=node1.oracle.com:2181/kafka0.9
在生产中一般不配置成如下的方式
zookeeper.connect=node1.oracle.com:2181
伪分布式 开启多个kafka 服务的话 需要修改的如下几个地方
broker.id=0
log.dirs=/home/hadoop/app/kafka_2.10-0.9.0.1/data
分布式的话 把节点1的配置 信息 scp 到其他分布式节点即可
启动kafka
[hadoop@node1 bin]$ pwd
/home/hadoop/app/kafka_2.10-0.9.0.1/bin
[hadoop@node1 bin]$ ./kafka-server-start.sh
USAGE: ./kafka-server-start.sh [-daemon] server.properties [--override property=value]*
前台启动 kafka
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-server-start.sh config/server.properties
[hadoop@node1 ~]$ jps
2822 NameNode
3269 ResourceManager
3372 NodeManager
3118 SecondaryNameNode
3710 QuorumPeerMain
3982 Kafka
2924 DataNode
4092 Jps
cp -rp server.properties server0.properties
cp -rp server.properties server1.properties
cp -rp server.properties server2.properties
bin/kafka-server-start.sh -daemon config/server0.properties
bin/kafka-server-start.sh -daemon config/server1.properties
bin/kafka-server-start.sh -daemon config/server2.properties
[hadoop@node1 kafka_2.10-0.9.0.1]$ jps -ml
3707 org.apache.hadoop.yarn.server.nodemanager.NodeManager
5401 sun.tools.jps.Jps -ml
3605 org.apache.hadoop.yarn.server.resourcemanager.ResourceManager
3159 org.apache.hadoop.hdfs.server.namenode.NameNode
4079 org.apache.zookeeper.server.quorum.QuorumPeerMain /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/../conf/zoo.cfg
3260 org.apache.hadoop.hdfs.server.datanode.DataNode
3427 org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/zookeeper-server-start.sh config/zookeeper.properties 默认是前台启动的
如果希望后台启动的话 可以如下的方式
[hadoop@node1 kafka_2.10-0.9.0.1]$ nohup bin/zookeeper-server-start.sh config/zookeeper.properties &
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkCli.sh -server 192.168.137.251:2181
[zk: 192.168.137.251:2181(CONNECTED) 0] ls /
[kafka0.9, zookeeper]
[zk: 192.168.137.251:2181(CONNECTED) 1] ls /kafka0.9
[consumers, config, controller, isr_change_notification, admin, brokers, controller_epoch]
[zk: 192.168.137.251:2181(CONNECTED) 2] ls /kafka0.9
[consumers, config, isr_change_notification, admin, brokers, controller_epoch]
[zk: 192.168.137.251:2181(CONNECTED) 3]
[zk: 192.168.137.251:2181(CONNECTED) 3]
[zk: 192.168.137.251:2181(CONNECTED) 3] get /kafka0.9/brokers
null
cZxid = 0xb
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xb
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0x16
cversion = 3
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 3
[zk: 192.168.137.251:2181(CONNECTED) 4] get /kafka0.9/brokers
null
cZxid = 0xb
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xb
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0x16
cversion = 3
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 3
[zk: 192.168.137.251:2181(CONNECTED) 5] get /kafka0.9/brokers/
seqid topics ids
[zk: 192.168.137.251:2181(CONNECTED) 5] get /kafka0.9/brokers/ids
null
cZxid = 0xc
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xc
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0xef
cversion = 14
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 0
[zk: 192.168.137.251:2181(CONNECTED) 6] get /kafka0.9/brokers/topics
null
cZxid = 0xd
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xd
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0x8b
cversion = 1
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 1
[zk: 192.168.137.251:2181(CONNECTED) 7]
关闭 kafka 服务
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-server-stop.sh
[hadoop@node1 kafka_2.10-0.9.0.1]$ jps
2822 NameNode
3269 ResourceManager
4452 Jps
3372 NodeManager
3118 SecondaryNameNode
3710 QuorumPeerMain
2924 DataNode
=====创建topic 及启动 生产者和消费者测试 ==
bin/kafka-topics.sh --zookeeper localhost:12181/kafka0.9 --delete --topic mykafka1
创建topic
bin/kafka-topics.sh --zookeeper localhost:12181/kafka0.9 --create --topic mykafka1 --partition 3 --replication-factor 2
bin/kafka-topics.sh --zookeeper localhost:12181/kafka0.9 --describe --topic mykafka1
启动kafka生产者
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-console-producer.sh --topic mykafka1 --broker-list localhost:9092
oracle
mysql
info
abd
dba
dbc
oracle
mysql
greenplum
mongodb
mycat
spark
spark streaming
spark streaming
spark mlib
关闭程序的话直接 Ctrl +c 就行了
启动kakfa 消费者
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-console-consumer.sh --zookeeper localhost:12181/kafka0.9 --topic mykafka1
oracle
mysql
info
abd
dba
dbc
oracle
mysql
greenplum
mongodb
mycat
spark
spark streaming
spark streaming
spark mlib
^CProcessed a total of 14 messages
[hadoop@node1 software]$ tar xf sqoop-1.4.6-cdh5.7.0.tar.gz -C ../app/
[hadoop@node1 software]$ tar xf hbase-1.2.0-cdh5.7.0.tar.gz -C ../app/
==== ZOOKEEPER 安装配置===
[hadoop@node1 software]$ tar xf zookeeper-3.4.5-cdh5.7.0.tar.gz -C ../app/
[hadoop@node1 ~]$ cd /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/conf/
[hadoop@node1 conf]$ cp -rp zoo_sample.cfg zoo.cfg
修改 zoo.cfg
dataDir=/home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/data
dataLogDir=/home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/logs
clientPort=2181
mkdir -p /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/data
mkdir -p /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/logs
添加环境变量
[hadoop@node1 ~]$ cat /home/hadoop/.bash_profile |grep ZOOKEEPER_HOME
export ZOOKEEPER_HOME=/home/hadoop/app/zookeeper-3.4.5-cdh5.7.0
export PATH=$PATH:$ZOOKEEPER_HOME/bin
[hadoop@node1 ~]$ source /home/hadoop/.bash_profile
启动 zookeeper
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkServer.sh start
JMX enabled by default
Using config: /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/../conf/zoo.cfg
Starting zookeeper ... /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkServer.sh: line 120: [: /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/data: binary operator expected
STARTED
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkServer.sh status
JMX enabled by default
Using config: /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/../conf/zoo.cfg
Mode: standalone
[hadoop@node1 ~]$ jps
3707 NodeManager
3605 ResourceManager
3159 NameNode
4079 QuorumPeerMain
3260 DataNode
4140 Jps
3427 SecondaryNameNode
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkCli.sh -server 192.168.137.251:2181
==== kafka 伪分布式配置==
[hadoop@node1 ~]$ tar xf /home/hadoop/software/kafka_2.10-0.9.0.1.tgz -C /home/hadoop/app/
[hadoop@node1 config]$ pwd
/home/hadoop/app/kafka_2.10-0.9.0.1/config
cp -rp server.properties server.properties.source
需要修改的地方
broker.id=0
log.dirs=/home/hadoop/app/kafka_2.10-0.9.0.1/data
zookeeper.connect=node1.oracle.com:2181/kafka0.9
在生产中一般不配置成如下的方式
zookeeper.connect=node1.oracle.com:2181
伪分布式 开启多个kafka 服务的话 需要修改的如下几个地方
broker.id=0
log.dirs=/home/hadoop/app/kafka_2.10-0.9.0.1/data
分布式的话 把节点1的配置 信息 scp 到其他分布式节点即可
启动kafka
[hadoop@node1 bin]$ pwd
/home/hadoop/app/kafka_2.10-0.9.0.1/bin
[hadoop@node1 bin]$ ./kafka-server-start.sh
USAGE: ./kafka-server-start.sh [-daemon] server.properties [--override property=value]*
前台启动 kafka
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-server-start.sh config/server.properties
[hadoop@node1 ~]$ jps
2822 NameNode
3269 ResourceManager
3372 NodeManager
3118 SecondaryNameNode
3710 QuorumPeerMain
3982 Kafka
2924 DataNode
4092 Jps
cp -rp server.properties server0.properties
cp -rp server.properties server1.properties
cp -rp server.properties server2.properties
bin/kafka-server-start.sh -daemon config/server0.properties
bin/kafka-server-start.sh -daemon config/server1.properties
bin/kafka-server-start.sh -daemon config/server2.properties
[hadoop@node1 kafka_2.10-0.9.0.1]$ jps -ml
3707 org.apache.hadoop.yarn.server.nodemanager.NodeManager
5401 sun.tools.jps.Jps -ml
3605 org.apache.hadoop.yarn.server.resourcemanager.ResourceManager
3159 org.apache.hadoop.hdfs.server.namenode.NameNode
4079 org.apache.zookeeper.server.quorum.QuorumPeerMain /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/../conf/zoo.cfg
3260 org.apache.hadoop.hdfs.server.datanode.DataNode
3427 org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/zookeeper-server-start.sh config/zookeeper.properties 默认是前台启动的
如果希望后台启动的话 可以如下的方式
[hadoop@node1 kafka_2.10-0.9.0.1]$ nohup bin/zookeeper-server-start.sh config/zookeeper.properties &
[hadoop@node1 ~]$ /home/hadoop/app/zookeeper-3.4.5-cdh5.7.0/bin/zkCli.sh -server 192.168.137.251:2181
[zk: 192.168.137.251:2181(CONNECTED) 0] ls /
[kafka0.9, zookeeper]
[zk: 192.168.137.251:2181(CONNECTED) 1] ls /kafka0.9
[consumers, config, controller, isr_change_notification, admin, brokers, controller_epoch]
[zk: 192.168.137.251:2181(CONNECTED) 2] ls /kafka0.9
[consumers, config, isr_change_notification, admin, brokers, controller_epoch]
[zk: 192.168.137.251:2181(CONNECTED) 3]
[zk: 192.168.137.251:2181(CONNECTED) 3]
[zk: 192.168.137.251:2181(CONNECTED) 3] get /kafka0.9/brokers
null
cZxid = 0xb
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xb
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0x16
cversion = 3
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 3
[zk: 192.168.137.251:2181(CONNECTED) 4] get /kafka0.9/brokers
null
cZxid = 0xb
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xb
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0x16
cversion = 3
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 3
[zk: 192.168.137.251:2181(CONNECTED) 5] get /kafka0.9/brokers/
seqid topics ids
[zk: 192.168.137.251:2181(CONNECTED) 5] get /kafka0.9/brokers/ids
null
cZxid = 0xc
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xc
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0xef
cversion = 14
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 0
[zk: 192.168.137.251:2181(CONNECTED) 6] get /kafka0.9/brokers/topics
null
cZxid = 0xd
ctime = Wed Feb 07 16:07:06 CST 2018
mZxid = 0xd
mtime = Wed Feb 07 16:07:06 CST 2018
pZxid = 0x8b
cversion = 1
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 1
[zk: 192.168.137.251:2181(CONNECTED) 7]
关闭 kafka 服务
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-server-stop.sh
[hadoop@node1 kafka_2.10-0.9.0.1]$ jps
2822 NameNode
3269 ResourceManager
4452 Jps
3372 NodeManager
3118 SecondaryNameNode
3710 QuorumPeerMain
2924 DataNode
=====创建topic 及启动 生产者和消费者测试 ==
bin/kafka-topics.sh --zookeeper localhost:12181/kafka0.9 --delete --topic mykafka1
创建topic
bin/kafka-topics.sh --zookeeper localhost:12181/kafka0.9 --create --topic mykafka1 --partition 3 --replication-factor 2
bin/kafka-topics.sh --zookeeper localhost:12181/kafka0.9 --describe --topic mykafka1
启动kafka生产者
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-console-producer.sh --topic mykafka1 --broker-list localhost:9092
oracle
mysql
info
abd
dba
dbc
oracle
mysql
greenplum
mongodb
mycat
spark
spark streaming
spark streaming
spark mlib
关闭程序的话直接 Ctrl +c 就行了
启动kakfa 消费者
[hadoop@node1 kafka_2.10-0.9.0.1]$ bin/kafka-console-consumer.sh --zookeeper localhost:12181/kafka0.9 --topic mykafka1
oracle
mysql
info
abd
dba
dbc
oracle
mysql
greenplum
mongodb
mycat
spark
spark streaming
spark streaming
spark mlib
^CProcessed a total of 14 messages