Kafka安装过程


1.查看5台服务器环境。
发现没有hadoop用户,hosts没有更改。
2.下载
[root@hadoopNN1 ~]# cd /data/hadoop/data1/usr/local/setupKafka/
vim newslaves #把要部署的服务器列表放到此文件
scp houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/kafka_2.10-0.8.1.1.tgz .
tar -xzf kafka_2.10-0.8.1.1.tgz
vim kafka_2.10-0.8.1.1/config/server.properties
#modify the following settings
num.network.threads=10
num.io.threads=12
log.dirs=/data/hadoop/data1/kafka/log,/data/hadoop/data2/kafka/log,/data/hadoop/data3/kafka/log,/data/hadoop/data4/kafka/log,/data/hadoop/data5/kafka/log,/data/hadoop/data6/kafka/log,/data/hadoop/data7/kafka/log,/data/hadoop/data8/kafka/log,/data/hadoop/data9/kafka/log,/data/hadoop/data10/kafka/log,/data/hadoop/data11/kafka/log,/data/hadoop/data12/kafka/log
num.partitions=5
log.retention.hours=168
zookeeper.connect=10-140-60-85:21818,10-140-60-86:21818,10-140-60-87:21818/kafka
rm -rf kafka_2.10-0.8.1.1/libs/zookeeper-3.3.4.jar

在zookeeper结点上创建/kafka
bin/zkCli.sh -server 10-140-60-85:21818,10-140-60-86:21818,10-140-60-87:21818
create /kafka ''
#由于zookeeper集群是3.4.6版,所以替换zookeeper包。
scp houzhizhen@10.58.182.158:/usr/local/hadoop/hadoop-2.5.2/share/hadoop/common/lib/zookeeper-3.4.6.jar kafka_2.10-0.8.1.1/libs/
cat newslaves | while read ip; do ips=`python -c "print '$ip'.split('.')"`; hostname="`python -c "print $ips[0]"`-`python -c "print $ips[1]"`-`python -c "print $ips[2]"`-`python -c "print $ips[3]"`"; echo $hostname; done > newslavehostname
paste newslaves newslavehostname > newhosts
cat newhosts >>   /etc/hosts
./upgrade.sh distribute newslaves /etc/hosts /etc/hosts
./upgrade.sh common newslaves "echo 'sshd:10.140.60.110' >> /etc/hosts.allow"
./upgrade.sh common newslaves "echo 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqBhK0CBgDyJ/cOdZbcb3eEFeV+0btZqsnk5dbPDMEXZ5Cn4aTv/7c67WvkDkC2qnF9ggkCibl6X5U3plqEkw7rZZSOp0HWS0NoaGB54IzMLvj61GPnXlkms4pz00StHK38P0rL3dNcMMpMenXlA1nZnNzz2q2KtpSex/lEILA0L8TNZI/VK4QhWHYZvNfOXE2P1ng2JyV4l756w+R2bMQz2j/i0M/YdqscdtvIe8VUiCMRslj22T9k79OLc/SBwZSZwjdDK1MogPcXSiNzAsDYBn0+rU3vkUsYiEdhaFQO7UgPWd3DxRtWLVLtBMRdfX1Qndwb+0kGUMuFoRs588+Q== root@10-140-60-110' >> /root/.ssh/authorized_keys"

./upgrade.sh distribute newslaves jdk1.7.0_51.tar.gz /tmp
scp houzhizhen@10.58.182.158:/usr/local/houzhizhen/scala/scala-2.10.4.tgz .
./upgrade.sh distribute newslaves scala-2.10.4.tgz /tmp
./upgrade.sh distribute newslaves setupScala.sh /tmp
./upgrade.sh common newslaves "sh /tmp/setupScala.sh"


scp houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/scala/scala-2.8.0.final.tgz .
./upgrade.sh distribute newslaves scala-2.8.0.final.tgz /tmp
./upgrade.sh distribute newslaves setupScala.sh /tmp
./upgrade.sh common newslaves "sh /tmp/setupScala.sh"
 
vim init.sh #remove ssh key

vim setup.sh #modify hadoop version


./upgrade.sh distribute newslaves init.sh /tmp
./upgrade.sh distribute newslaves setup.sh /tmp
./upgrade.sh common newslaves sh /tmp/init.sh
./upgrade.sh common newslaves sh /tmp/setup.sh
#on 10.140.60.110 ; su - hadoop; ssh-keygen to generate hadoop key
./upgrade.sh common newslaves 'echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAzAKtags9hHg/FtLcZqLCqwZa6UFuNSliowQTbw3RwOBTeQG3VO/3bl65wJxSOoriZqMnsfsFhvdI5AiZ3/45zsVcKkK1vWqvyWF7n0De6RwjlRtoe7bBQd8Ohg5lu7sw28RmuAJeABYV5A8nch4XuzVOkNprGlGht3eFj6OSy4k4tfFQCh2bfcc6drdM9seZRuhIvR7zcljzhmCsZ8jTdfEQgIzmhYbLw2kOzAJcChUU5sBdt1Ei+IX9eEW5putPxpjQK8RSrRB3gDWRM5kgKMSu0qppAxDuM7NLn/7fDKinsVQjoC7dfaBy9OgfCM/JYltcwqCAoj57LHfNjRu+SQ== hadoop@10-140-60-110" >> /home/hadoop/.ssh/authorized_keys'

./upgrade.sh distribute newslaves kafka_2.10-0.8.1.1 /data/hadoop/data1/usr/local/
./upgrade.sh distribute newslaves setupKafka.sh /tmp
./upgrade.sh common newslaves "cd /tmp; sh setupKafka.sh"

./upgrade.sh distribute newslaves kafka_2.10-0.8.1.1/config/ser
# into every host in file newslaves;modify broker.id=0
#vim /usr/local/kafka/config/server.properties

#check
./upgrade.sh common newslaves 'grep "broker.id" /usr/local/kafka/config/server.properties '

scp -r ../setupKafka 10-140-60-110:/data/hadoop/data1/usr/local/
ssh 10-140-60-110 "chown -R hadoop:hadoop /data/hadoop/data1/usr/local/setupKafka"

#ssh 10-140-60-110 ; su - hadoop; cd /data/hadoop/data1/usr/local/setupKafka;
./upgrade.sh common newslaves "mkdir -p /usr/local/kafka/logs"
./upgrade.sh common newslaves "/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties"

./upgrade.sh  distribute  newslaves kafka_2.10-0.8.1.1/config/server.properties /usr/local/kafka/config/server.properties

Now create a new topic with a replication factor of three:

> bin/kafka-topics.sh --create --zookeeper 10-140-60-85:21818,10-140-60-86:21818,10-140-60-87:21818/kafka --replication-factor 3 --partitions 1 --topic my-replicated-topic
bin/kafka-topics.sh --describe --zookeeper 10-140-60-85:21818,10-140-60-86:21818,10-140-60-87:21818/kafka --topic my-replicated-topic
Topic:my-replicated-topic       PartitionCount:1        ReplicationFactor:3     Configs:
        Topic: my-replicated-topic      Partition: 0    Leader: 2       Replicas: 2,4,0 Isr: 2,4,0

bin/kafka-topics.sh --describe --zookeeper 10-140-60-85:21818,10-140-60-86:21818,10-140-60-87:21818/kafka --topic test
bin/kafka-console-producer.sh --broker-list 10-140-60-110:9092,10-140-60-111:9092,10-140-60-112:9092,10-140-60-119:9092,10-140-60-120:9092 --topic my-replicated-topic

bin/kafka-console-consumer.sh --zookeeper 10-140-60-85:21818,10-140-60-86:21818,10-140-60-87:21818/kafka --from-beginning --topic my-replicated-topic


vim /etc/profile
export KAFKA_ZOOKEEPER_URL=10-140-60-85:21818,10-140-60-86:21818,10-140-60-87:21818/kafka
export BROKER_LIST=10-140-60-110:9092,10-140-60-111:9092,10-140-60-112:9092,10-140-60-119:9092,10-140-60-120:9092
bin/kafka-topics.sh --describe --zookeeper ${KAFKA_ZOOKEEPER_URL} --topic my-replicated-topic
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值