1.检查系统环境,有python 2.6.6
2.复用已经安装的zookeeper集群
3.在主
4.安装zeromq
scp houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/zeromq/zeromq-4.0.5.tar.gz .
tar -xzf zeromq-4.0.5.tar.gz
cd zeromq-4.0.5
./configure
make
make install
5.安装jzmq
scp -r houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/jzmq/jzmq .
cd jzmq
./autogen.sh
./configure
make
make install
6.安装storm
scp -r houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/storm/apache-storm-0.9.3.tar.gz .
tar -xzf apache-storm-0.9.3.tar.gz
cd apache-storm-0.9.3
vim conf/storm.yaml
#set the following settings
storm.zookeeper.servers:
- "10-140-60-85"
- "10-140-60-86"
- "10-140-60-87"
storm.zookeeper.port: 21818
storm.local.dir: /usr/local/storm/workdir
storm.cluster.mode: distributed
#storm.local.mode.zmq true
storm.zookeeper.root: /storm
storm.zookeeper.session.timeout: 60000
#storm.id
nimbus.host: 10-140-60-73
storm.log.dir: /usr/local/storm/logs
supervisor.slots.ports:
- 6700
- 6701
- 6702
- 6703
- 6704
- 6705
- 6706
- 6707
- 6708
- 6709
- 6710
- 6711
- 6712
- 6713
- 6714
- 6715
- 6716
- 6717
- 6718
- 6719
mv apache-storm-0.9.3 /data/hadoop/data1/usr/local/
ln -s /data/hadoop/data1/usr/local/apache-storm-0.9.3 /usr/local/storm
chown -R hadoop:hadoop /data/hadoop/data1/usr/local/apache-storm-0.9.3
mkdir -p/usr/local/storm/workdir
7.部署其它节点
vim newslaves 文件内容如下:
10.140.60.74
10.140.60.75
10.140.60.76
10.140.60.77
10.140.60.78
10.140.60.79
10.140.60.80
10.140.60.83
10.140.60.84
把zeromq-4.0.5.tar.gz分发到各节点的/tmp目录下并安装。
./upgrade.sh distribute newslaves zeromq-4.0.5.tar.gz /tmp
./upgrade.sh common newslaves "cd /tmp;tar -xzf zeromq-4.0.5.tar.gz; cd zeromq-4.0.5; ./configure; make; make install; cd ..; rm -rf zeromq-4.0.5"
./upgrade.sh common newslaves "cd /tmp;rm -rf zeromq-4.0.5.tar.gz"
#分发jzmq并安装
./upgrade.sh distribute newslaves jzmq /data/hadoop/data1/usr/local/
./upgrade.sh common newslaves "cd /data/hadoop/data1/usr/local/jzmq;./autogen.sh;./configure; make; make install"
#分发storm并设置 环境变量
./upgrade.sh distribute newslaves /data/hadoop/data1/usr/local/apache-storm-0.9.3 /data/hadoop/data1/usr/local/
./upgrade.sh common newslaves "rm -rf /usr/local/storm;ln -s /data/hadoop/data1/usr/local/apache-storm-0.9.3 /usr/local/storm; chown -R hadoop:hadoop /data/hadoop/data1/usr/local/apache-storm-0.9.3 "
./upgrade.sh common newslaves "echo 'export STORM_HOME=/usr/local/storm' >> /etc/profile"
./upgrade.sh common newslaves "echo 'export PATH=${PATH}:${STORM_HOME}/bin' >> /etc/profile"
./upgrade.sh distribute newslaves /etc/hosts /etc/hosts
启动supervisor程序
./upgrade.sh common newslaves " /usr/local/storm/bin/storm supervisor &;exit"
停止supervisor程序
./upgrade.sh distribute newslaves bin/stop-supervisor.sh /usr/local/storm/bin/
./upgrade.sh common newslaves "/usr/local/storm/bin/stop-supervisor.sh"
./upgrade.sh common newslaves "for pid in $((`jps | grep supervisor | awk -F ' ' '{print $1}'`)); do kill $pid; done; jps"
for pid in $(`sh bin/getSupervisorPID.sh`)
do
echo kill $pid;
done
for pid in $('')
do
echo $pid
done
2.复用已经安装的zookeeper集群
3.在主
4.安装zeromq
scp houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/zeromq/zeromq-4.0.5.tar.gz .
tar -xzf zeromq-4.0.5.tar.gz
cd zeromq-4.0.5
./configure
make
make install
5.安装jzmq
scp -r houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/jzmq/jzmq .
cd jzmq
./autogen.sh
./configure
make
make install
6.安装storm
scp -r houzhizhen@10.58.182.158:/home/houzhizhen/usr/local/storm/apache-storm-0.9.3.tar.gz .
tar -xzf apache-storm-0.9.3.tar.gz
cd apache-storm-0.9.3
vim conf/storm.yaml
#set the following settings
storm.zookeeper.servers:
- "10-140-60-85"
- "10-140-60-86"
- "10-140-60-87"
storm.zookeeper.port: 21818
storm.local.dir: /usr/local/storm/workdir
storm.cluster.mode: distributed
#storm.local.mode.zmq true
storm.zookeeper.root: /storm
storm.zookeeper.session.timeout: 60000
#storm.id
nimbus.host: 10-140-60-73
storm.log.dir: /usr/local/storm/logs
supervisor.slots.ports:
- 6700
- 6701
- 6702
- 6703
- 6704
- 6705
- 6706
- 6707
- 6708
- 6709
- 6710
- 6711
- 6712
- 6713
- 6714
- 6715
- 6716
- 6717
- 6718
- 6719
mv apache-storm-0.9.3 /data/hadoop/data1/usr/local/
ln -s /data/hadoop/data1/usr/local/apache-storm-0.9.3 /usr/local/storm
chown -R hadoop:hadoop /data/hadoop/data1/usr/local/apache-storm-0.9.3
mkdir -p/usr/local/storm/workdir
7.部署其它节点
vim newslaves 文件内容如下:
10.140.60.74
10.140.60.75
10.140.60.76
10.140.60.77
10.140.60.78
10.140.60.79
10.140.60.80
10.140.60.83
10.140.60.84
把zeromq-4.0.5.tar.gz分发到各节点的/tmp目录下并安装。
./upgrade.sh distribute newslaves zeromq-4.0.5.tar.gz /tmp
./upgrade.sh common newslaves "cd /tmp;tar -xzf zeromq-4.0.5.tar.gz; cd zeromq-4.0.5; ./configure; make; make install; cd ..; rm -rf zeromq-4.0.5"
./upgrade.sh common newslaves "cd /tmp;rm -rf zeromq-4.0.5.tar.gz"
#分发jzmq并安装
./upgrade.sh distribute newslaves jzmq /data/hadoop/data1/usr/local/
./upgrade.sh common newslaves "cd /data/hadoop/data1/usr/local/jzmq;./autogen.sh;./configure; make; make install"
#分发storm并设置 环境变量
./upgrade.sh distribute newslaves /data/hadoop/data1/usr/local/apache-storm-0.9.3 /data/hadoop/data1/usr/local/
./upgrade.sh common newslaves "rm -rf /usr/local/storm;ln -s /data/hadoop/data1/usr/local/apache-storm-0.9.3 /usr/local/storm; chown -R hadoop:hadoop /data/hadoop/data1/usr/local/apache-storm-0.9.3 "
./upgrade.sh common newslaves "echo 'export STORM_HOME=/usr/local/storm' >> /etc/profile"
./upgrade.sh common newslaves "echo 'export PATH=${PATH}:${STORM_HOME}/bin' >> /etc/profile"
./upgrade.sh distribute newslaves /etc/hosts /etc/hosts
启动supervisor程序
./upgrade.sh common newslaves " /usr/local/storm/bin/storm supervisor &;exit"
停止supervisor程序
./upgrade.sh distribute newslaves bin/stop-supervisor.sh /usr/local/storm/bin/
./upgrade.sh common newslaves "/usr/local/storm/bin/stop-supervisor.sh"
./upgrade.sh common newslaves "for pid in $((`jps | grep supervisor | awk -F ' ' '{print $1}'`)); do kill $pid; done; jps"
for pid in $(`sh bin/getSupervisorPID.sh`)
do
echo kill $pid;
done
for pid in $('')
do
echo $pid
done