hadoop伪分布式一键部署(适用于centos)

脚本1

vi env.sh

#!/bin/bash
#设置主机名
hostnamectl set-hostname hadoop
bash
#配置hosts
echo "192.168.200.100 master" >> /etc/hosts  
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

vi ssh.sh

#!/bin/bash

# 定义要配置 ssh 免密登录的服务器名称数组和别名
servers=("hadoop")
aliases=("hadoop_alias")

# 生成公钥
ssh-keygen -t rsa -f ~/.ssh/id_rsa -N ""

# 循环遍历服务器数组,并将公钥复制到其他所有服务器
for i in "${!servers[@]}"; do
    server="${servers[i]}"
    alias="${aliases[i]}"  # 如果您有别名的话,否则可以忽略这一行
    ssh-copy-id -i ~/.ssh/id_rsa.pub "$server"
    if [ $? -eq 0 ]; then
        echo "SSH key copied to $server successfully."
    else
        echo "Failed to copy SSH key to $server."
    fi
done

# 测试 ssh 免密登录是否配置成功
for server in "${servers[@]}"; do
    ssh "$server" "hostname"
done

vi jdk.sh

#!/bin/bash

# 要发送的JAVA文件路径
FILE_PATH=/root/jdk-8u162-linux-x64.tar.gz

# 设置环境变量
export JAVA_HOME=/usr/local/jdk1.8.0_162
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

# 主机列表
hosts=("hadoop")

# 循环安装 JDK 到每个主机
for host in "${hosts[@]}"; do
    echo "开始在 $host 安装 JDK 1.8"

    # 使用 ssh 在远程主机上执行以下命令
    ssh -n $host "
        tar -zxvf $FILE_PATH -C /usr/local;
        echo 'export JAVA_HOME=/usr/local/jdk1.8.0_162' >> /etc/profile;
        echo 'export PATH=/usr/local/jdk1.8.0_162/bin:\$PATH' >> /etc/profile;
        echo 'export CLASSPATH=/usr/local/jdk1.8.0_162/lib/dt.jar:/usr/local/jdk1.8.0_162/lib/tools.jar' >> /etc/profile;
        source /etc/profile;
        java -version;
    "

    # 检查安装是否成功
    if ssh -n $host "java -version 2>&1" | grep -q "java version"; then
        echo "在 $host 安装 JDK 1.8 完成"
    else
        echo "在 $host 安装 JDK 1.8 失败"
    fi
done

echo "完成所有服务器 JDK 1.8 的安装"

vi hadoop.sh

#!/bin/bash

# 定义Hadoop版本和路径
HADOOP_VERSION="hadoop-3.1.3"
HADOOP_TAR_GZ="hadoop-3.1.3.tar.gz"
HADOOP_HOME="/usr/local/$HADOOP_VERSION"
HADOOP_TMP_DIR="$HADOOP_HOME/tmp"

# 检查Hadoop的home和文件路径是否已设置
if [ -z "$HADOOP_HOME" ]; then
  echo "错误:Hadoop的home路径未设置。"
  exit 1
fi

# 解压hadoop
tar -zxvf "/root/$HADOOP_TAR_GZ" -C /usr/local/

# 创建临时目录
mkdir -p "$HADOOP_TMP_DIR"

# 设置HADOOP_HOME和PATH
echo "export HADOOP_HOME=$HADOOP_HOME" >> /etc/profile
echo "export PATH=\$PATH:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin" >> /etc/profile
source /etc/profile

# 修改hadoop-env.sh
cat >> "$HADOOP_HOME/etc/hadoop/hadoop-env.sh" << EOF
export JAVA_HOME=/usr/local/jdk1.8.0_162
export HADOOP_TMP_DIR=$HADOOP_TMP_DIR
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
EOF

# 修改core-site.xml
cat > "$HADOOP_HOME/etc/hadoop/core-site.xml" << EOF
<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>$HADOOP_TMP_DIR</value>
        <description>A base for other temporary directories.</description>
    </property>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://hadoop:9000</value>
    </property>
</configuration>
EOF

# 修改hdfs-site.xml
cat > "$HADOOP_HOME/etc/hadoop/hdfs-site.xml" << EOF
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file://$HADOOP_HOME/hadoopdata/namenode</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file://$HADOOP_HOME/hadoopdata/datanode</value>
    </property>
</configuration>
EOF

# 修改yarn-site.xml
cat > "$HADOOP_HOME/etc/hadoop/yarn-site.xml" << EOF
<configuration>
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>hadoop</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
</configuration>
EOF

# 修改mapred-site.xml
cat > "$HADOOP_HOME/etc/hadoop/mapred-site.xml" << EOF
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
</configuration>
EOF

# 修改workers
echo "hadoop" > "$HADOOP_HOME/etc/hadoop/workers"

# 格式化HDFS
$HADOOP_HOME/bin/hdfs namenode -format

# 启动Hadoop集群
$HADOOP_HOME/sbin/start-dfs.sh
$HADOOP_HOME/sbin/start-yarn.sh

# 查看Hadoop版本
$HADOOP_HOME/bin/hadoop version

# 查看进程
$HADOOP_HOME/bin/hdfs dfsadmin -report

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值