Ambari卸载shell

#!/bin/bash
# Program:
# uninstall ambari automatic
# History:
# 2014/01/13 - Ivan - 2862099249@qq.com - First release
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH

#取得集群的所有主机名,这里需要注意:/etc/hosts配置的IP和主机名只能用一个空格分割
hostList=$(cat /etc/hosts | tail -n +3 | cut -d ' ' -f 2)
yumReposDir=/etc/yum.repos.d/
alterNativesDir=/etc/alternatives/
pingCount=5
logPre=TDP

read -p "Please input your master hostname: " master
master=${master:-"master"}
ssh $master "ambari-server stop"
#重置ambari数据库
ssh $master "ambari-server reset"

for host in $hostList
do
#echo $host
#检测主机的连通性
unPing=$(ping $host -c $pingCount | grep 'Unreachable' | wc -l)
if [ "$unPing" == "$pingCount" ]; then
echo -e "$logPre======>$host is Unreachable,please check '/etc/hosts' file"
continue
fi

echo "$logPre======>$host deleting... \n"
#1.)删除hdp.repo、HDP.repo、HDP-UTILS.repo和ambari.repo
ssh $host "cd $yumReposDir"
ssh $host "rm -rf $yumReposDir/hdp.repo"
ssh $host "rm -rf $yumReposDir/HDP*"
ssh $host "rm -rf $yumReposDir/ambari.repo"

#删除HDP相关的安装包
ssh $host "yum remove -y sqoop.noarch"
ssh $host "yum remove -y lzo-devel.x86_64"
ssh $host "yum remove -y hadoop-libhdfs.x86_64"
ssh $host "yum remove -y rrdtool.x86_64"
ssh $host "yum remove -y hbase.noarch"
ssh $host "yum remove -y pig.noarch"
ssh $host "yum remove -y lzo.x86_64"
ssh $host "yum remove -y ambari-log4j.noarch"
ssh $host "yum remove -y oozie.noarch"
ssh $host "yum remove -y oozie-client.noarch"
ssh $host "yum remove -y gweb.noarch"
ssh $host "yum remove -y snappy-devel.x86_64"
ssh $host "yum remove -y hcatalog.noarch"
ssh $host "yum remove -y python-rrdtool.x86_64"
ssh $host "yum remove -y nagios.x86_64"
ssh $host "yum remove -y webhcat-tar-pig.noarch"
ssh $host "yum remove -y snappy.x86_64"
ssh $host "yum remove -y libconfuse.x86_64"
ssh $host "yum remove -y webhcat-tar-hive.noarch"
ssh $host "yum remove -y ganglia-gmetad.x86_64"
ssh $host "yum remove -y extjs.noarch"
ssh $host "yum remove -y hive.noarch"
ssh $host "yum remove -y hadoop-lzo.x86_64"
ssh $host "yum remove -y hadoop-lzo-native.x86_64"
ssh $host "yum remove -y hadoop-native.x86_64"
ssh $host "yum remove -y hadoop-pipes.x86_64"
ssh $host "yum remove -y nagios-plugins.x86_64"
ssh $host "yum remove -y hadoop.x86_64"
ssh $host "yum remove -y zookeeper.noarch"
ssh $host "yum remove -y hadoop-sbin.x86_64"
ssh $host "yum remove -y ganglia-gmond.x86_64"
ssh $host "yum remove -y libganglia.x86_64"
ssh $host "yum remove -y perl-rrdtool.x86_64"
ssh $host "yum remove -y epel-release.noarch"
ssh $host "yum remove -y compat-readline5*"
ssh $host "yum remove -y fping.x86_64"
ssh $host "yum remove -y perl-Crypt-DES.x86_64"
ssh $host "yum remove -y exim.x86_64"
ssh $host "yum remove -y ganglia-web.noarch"
ssh $host "yum remove -y perl-Digest-HMAC.noarch"
ssh $host "yum remove -y perl-Digest-SHA1.x86_64"
ssh $host "yum remove -y bigtop-jsvc.x86_64"

#删除快捷方式
ssh $host "cd $alterNativesDir"
ssh $host "rm -rf hadoop-etc"
ssh $host "rm -rf zookeeper-conf"
ssh $host "rm -rf hbase-conf"
ssh $host "rm -rf hadoop-log"
ssh $host "rm -rf hadoop-lib"
ssh $host "rm -rf hadoop-default"
ssh $host "rm -rf oozie-conf"
ssh $host "rm -rf hcatalog-conf"
ssh $host "rm -rf hive-conf"
ssh $host "rm -rf hadoop-man"
ssh $host "rm -rf sqoop-conf"
ssh $host "rm -rf hadoop-confone"

#删除用户
ssh $host "userdel -rf nagios"
ssh $host "userdel -rf hive"
ssh $host "userdel -rf ambari-qa"
ssh $host "userdel -rf hbase"
ssh $host "userdel -rf oozie"
ssh $host "userdel -rf hcat"
ssh $host "userdel -rf mapred"
ssh $host "userdel -rf hdfs"
ssh $host "userdel -rf rrdcached"
ssh $host "userdel -rf zookeeper"
ssh $host "userdel -rf sqoop"
ssh $host "userdel -rf puppet"
ssh $host "userdel -rf flume"
ssh $host "userdel -rf tez"
ssh $host "userdel -rf yarn"
ssh $host "userdel -rf storm"
ssh $host "userdel -rf knox"
ssh $host "userdel -rf kafka"
ssh $host "userdel -rf falcon"
ssh $host "userdel -rf hcat"
ssh $host "userdel -rf atlas"
ssh $host "userdel -rf mahout"
ssh $host "userdel -rf spark"


#删除文件夹
ssh $host "rm -rf /hadoop"
ssh $host "rm -rf /etc/hadoop"
ssh $host "rm -rf /etc/hbase"
ssh $host "rm -rf /etc/hcatalog"
ssh $host "rm -rf /etc/hive"
ssh $host "rm -rf /etc/ganglia"
ssh $host "rm -rf /etc/nagios"
ssh $host "rm -rf /etc/oozie"
ssh $host "rm -rf /etc/sqoop"
ssh $host "rm -rf /etc/zookeeper"
ssh $host "rm -rf /etc/kafka"
ssh $host "rm -rf /etc/falcon"
ssh $host "rm -rf /etc/yarn"
ssh $host "rm -rf /etc/spark"
ssh $host "rm -rf /etc/flume"
ssh $host "rm -rf /etc/mapred"
ssh $host "rm -rf /etc/ambari-qa"
ssh $host "rm -rf /etc/tez"


ssh $host "rm -rf /var/run/hadoop"
ssh $host "rm -rf /var/run/hbase"
ssh $host "rm -rf /var/run/hive"
ssh $host "rm -rf /var/run/ganglia"
ssh $host "rm -rf /var/run/nagios"
ssh $host "rm -rf /var/run/oozie"
ssh $host "rm -rf /var/run/zookeeper"
ssh $host "rm -rf /var/run/ambari-metrics-monitor"
ssh $host "rm -rf /var/run/ambari-server"
ssh $host "rm -rf /var/run/hadoop-mapreduce"
ssh $host "rm -rf /var/run/hadoop-yarn"
ssh $host "rm -rf /var/run/spark"



ssh $host "rm -rf /var/log/hadoop"
ssh $host "rm -rf /var/log/hbase"
ssh $host "rm -rf /var/log/hive"
ssh $host "rm -rf /var/log/nagios"
ssh $host "rm -rf /var/log/oozie"
ssh $host "rm -rf /var/log/zookeeper"
ssh $host "rm -rf /var/log/hadoop-mapreduce"
ssh $host "rm -rf /var/log/hadoop-yarn"
ssh $host "rm -rf /var/log/spark"
ssh $host "rm -rf /var/nagios"

ssh $host "rm -rf /usr/lib/hadoop"
ssh $host "rm -rf /usr/lib/hbase"
ssh $host "rm -rf /usr/lib/hcatalog"
ssh $host "rm -rf /usr/lib/hive"
ssh $host "rm -rf /usr/lib/oozie"
ssh $host "rm -rf /usr/lib/sqoop"
ssh $host "rm -rf /usr/lib/zookeeper"
ssh $host "rm -rf /var/lib/hive"
ssh $host "rm -rf /var/lib/ganglia"
ssh $host "rm -rf /var/lib/oozie"
ssh $host "rm -rf /var/lib/zookeeper"
ssh $host "rm -rf /var/lib/hadoop-hdfs"
ssh $host "rm -rf /var/lib/hadoop-mapreduce"
ssh $host "rm -rf /var/lib/hadoop-yarn"

ssh $host "rm -rf /var/tmp/oozie"
ssh $host "rm -rf /tmp/hive"
ssh $host "rm -rf /tmp/nagios"
ssh $host "rm -rf /tmp/ambari-qa"
ssh $host "rm -rf /tmp/sqoop-ambari-qa"

ssh $host "rm -rf /hadoop/oozie"
ssh $host "rm -rf /hadoop/zookeeper"
ssh $host "rm -rf /hadoop/mapred"
ssh $host "rm -rf /hadoop/hdfs"
ssh $host "rm -rf /tmp/hadoop-hive"
ssh $host "rm -rf /tmp/hadoop-nagios"
ssh $host "rm -rf /tmp/hadoop-hcat"
ssh $host "rm -rf /tmp/hadoop-ambari-qa"
ssh $host "rm -rf /tmp/hsperfdata_hbase"
ssh $host "rm -rf /tmp/hsperfdata_hive"
ssh $host "rm -rf /tmp/hsperfdata_nagios"
ssh $host "rm -rf /tmp/hsperfdata_oozie"
ssh $host "rm -rf /tmp/hsperfdata_zookeeper"
ssh $host "rm -rf /tmp/hsperfdata_mapred"
ssh $host "rm -rf /tmp/hsperfdata_hdfs"
ssh $host "rm -rf /tmp/hsperfdata_hcat"
ssh $host "rm -rf /tmp/hsperfdata_ambari-qa"
ssh $host "rm -rf /tmp/hsperfdata_admin"
ssh $host "rm -rf /tmp/hsperfdata_spark"
#删除ambari相关包
ssh $host "yum remove -y ambari-*"
ssh $host "yum remove -y postgresql"
ssh $host "rm -rf /var/lib/ambari*"
ssh $host "rm -rf /var/log/ambari*"
ssh $host "rm -rf /etc/ambari*"

echo "$logPre======>$host is done! \n"
done
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Kerberos后导致tez.am.view-acls参数消失的情况可能涉及到Ambari的源码。Ambari是一个开源的集群管理工具,用于部署、配置和管理Hadoop生态系统中的各种组件,包括Tez。 在Ambari中,配置参数的管理和持久化是通过Ambari Server和Ambari Agent之间的通信来实现的。当进行Kerberos操作时,可能会触发Ambari相关的源码执行以下步骤: 1. Ambari Server接收到Kerberos的请求。 2. Ambari Server通过与Ambari Agent的通信,将Kerberos的指令发送给对应的主机。 3. Ambari Agent在主机上执行相应的操作,包括修改配置文件、删除相关的Kerberos配置等。 4. 在这个过程中,可能出现了错误或不完整的操作,导致tez.am.view-acls参数被错误地删除或修改。 具体而言,可能涉及到以下源码文件和逻辑: 1. Ambari Server源码:在Ambari Server的源码中,可能涉及到处理Kerberos请求的逻辑。这涉及到与Ambari Agent的通信和指令传递,以及对主机上配置文件的修改等操作。 2. Ambari Agent源码:在Ambari Agent的源码中,可能涉及到接收和执行Kerberos指令的逻辑。这可能包括对tez-site.xml文件进行修改或删除相关配置参数的操作。 3. 相关配置文件:tez-site.xml是Tez的配置文件之一,其中包含了tez.am.view-acls参数的配置。在Kerberos的过程中,Ambari可能会修改或删除这个配置文件中与Kerberos相关的配置,导致tez.am.view-acls参数消失。 需要注意的是,具体的源码实现可能因Ambari版本和定制修改而有所不同。如果遇到该问题,建议参考Ambari官方文档、社区讨论或联系Ambari开发团队以获取更准确的源码分析和解决方案。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值