shell一键部署jdk_三台
1. 准备好三台已经安装好jdk的机器
172.16.149.150 cdh-master
172.16.149.151 cdh-worker1
172.16.149.152 cdh-worker2
2. 编写脚本
ipath=/opt/bigdata
echo "安装hadoop集群"
installpath=$(cd `dirname $0`; pwd)
filepath=${installpath}/files
echo "解压hadoop压缩包"
tar -xvf ${filepath}/hadoop.tar.gz > /dev/null 2>&1
cd hadoop-* && hadoopname=`pwd | awk -F '/' '{print $NF}'`
echo "hadoop版本:${hadoopname}"
echo "移动解压包到指定hadoop路径"
cd ${installpath} && cp -rf ${hadoopname}/ ${ipath}/hadoop/
echo "配置hadoop环境变量"
echo "export HADOOP_HOME=${ipath}/hadoop/${hadoopname}" >> ~/.bash_profile
echo 'export PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH' >> ~/.bash_profile
echo "刷新环境变量"
source ~/.bash_profile > /dev/null 2>&1
echo "解压包分发到其他节点"
for i in {1..2}
do
scp -r ${ipath}/hadoop/${hadoopname} root@cdh-worker$i:${ipath}/hadoop
done
echo "启动Hadoop 。。。 先执行关闭操作"
${ipath}/hadoop/${hadoopname}/sbin/stop-all.sh
if [ ! -d ${ipath}/hadoop/dfs ];then
mkdir -p ${ipath}/hadoop/dfs/name
mkdir -p ${ipath}/hadoop/dfs/data
else
rm -rf ${ipath}/hadoop/dfs
mkdir -p ${ipath}/hadoop/dfs/name
mkdir -p ${ipath}/hadoop/dfs/data
fi
rm -rf ${ipath}/hadoop/tmp
for i in {1..2}
do
if ssh root@cdh-worker$i "test -d ${ipath}/hadoop/dfs";then
ssh root@cdh-worker$i "rm -rf ${ipath}/hadoop/dfs"
ssh root@cdh-worker$i "mkdir -p ${ipath}/hadoop/dfs/name"
ssh root@cdh-worker$i "mkdir -p ${ipath}/hadoop/dfs/data"
else
ssh root@cdh-worker$i "mkdir -p ${ipath}/hadoop/dfs/name"
ssh root@cdh-worker$i "mkdir -p ${ipath}/hadoop/dfs/data"
fi
ssh root@cdh-worker$i "rm -rf ${ipath}/hadoop/yarn"
done
echo "初始化namenode"
hadoop namenode -format
echo "启动start-all.sh"
${ipath}/hadoop/${hadoopname}/sbin/start-all.sh