本章简介
- 以ubuntu:16.04作为基础镜像。
- 在基础镜像内安装一个hadoop,并制作成镜像hadoop。
- 启动多个hadoop容器(一个master,两个slave)。
- 验证可用性。
- 使用hadoop集群。
基础镜像
第一步,拉取基础docker镜像。
# 拉取镜像
sudo docker pull ubuntu:16.04
# 启动容器
docker run -it --name hdp ubuntu:16.04
安装hadoop
以下为容器内的操作:
## 修改资源
echo "deb http://mirrors.aliyun.com/ubuntu/ xenial main
deb-src http://mirrors.aliyun.com/ubuntu/ xenial main
deb http://mirrors.aliyun.com/ubuntu/ xenial-updates main
deb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates main
deb http://mirrors.aliyun.com/ubuntu/ xenial universe
deb-src http://mirrors.aliyun.com/ubuntu/ xenial universe
deb http://mirrors.aliyun.com/ubuntu/ xenial-updates universe
deb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates universe
deb http://mirrors.aliyun.com/ubuntu/ xenial-security main
deb-src http://mirrors.aliyun.com/ubuntu/ xenial-security main
deb http://mirrors.aliyun.com/ubuntu/ xenial-security universe
deb-src http://mirrors.aliyun.com/ubuntu/ xenial-security universe" > /etc/apt/sources.list
## 环境安装
apt update
apt install openjdk-8-jdk
apt install scala
apt install vim
apt install net-tools
apt install openssh-server
apt install openssh-client
## ssh
cd ~
ssh-keygen -t rsa -P ""
cat .ssh/id_rsa.pub >> .ssh/authorized_keys
service ssh start
ssh 127.0.0.1
## 设置ssh为启动
echo service ssh start >> ~/.bashrc
安装hadoop
### 在本地系统下载:https://mirror.bit.edu.cn/apache/hadoop/common/hadoop-3.2.1/hadoop-3.2.1.tar.gz
### 复制到容器内:docker cp hadoop-3.2.1.tar.gz hdp:/root
tar -zxvf hadoop-3.2.1.tar.gz -C /usr/local
cd /usr/local
mv hadoop-3.2.1 hadoop
## 修改环境变量
echo "
#java
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
#hadoop
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_YARN_HOME=$HADOOP_HOME
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec
export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export HDFS_DATANODE_USER=root
export HDFS_DATANODE_SECURE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export HDFS_NAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
" >> /etc/profile
source /etc/profile
配置hadoop
## 修改/usr/local/hadoop/etc/hadoop/hadoop-env.sh
echo "
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
" >> /usr/local/hadoop/etc/hadoop/hadoop-env.sh
## /usr/local/hadoop/etc/hadoop下的core-site.xml
echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/root/hadoop/tmp</value>
</property>
</configuration>
" > /usr/local/hadoop/etc/hadoop/core-site.xml
## /usr/local/hadoop/etc/hadoop下的hdfs-site.xml
echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/root/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.namenode.data.dir</name>
<value>/root/hadoop/hdfs/data</value>
</property>
</configuration>
" > /usr/local/hadoop/etc/hadoop/hdfs-site.xml
## /usr/local/hadoop/etc/hadoop下的mapred-site.xml
echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.application.classpath</name>
<value>
/usr/local/hadoop/etc/hadoop,
/usr/local/hadoop/share/hadoop/common/*,
/usr/local/hadoop/share/hadoop/common/lib/*,
/usr/local/hadoop/share/hadoop/hdfs/*,
/usr/local/hadoop/share/hadoop/hdfs/lib/*,
/usr/local/hadoop/share/hadoop/mapreduce/*,
/usr/local/hadoop/share/hadoop/mapreduce/lib/*,
/usr/local/hadoop/share/hadoop/yarn/*,
/usr/local/hadoop/share/hadoop/yarn/lib/*
</value>
</property>
</configuration>
" > /usr/local/hadoop/etc/hadoop/mapred-site.xml
## /usr/local/hadoop/etc/hadoop下的yarn-site.xml
echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
" > /usr/local/hadoop/etc/hadoop/yarn-site.xml
## /usr/local/hadoop/etc/hadoop下的 workers
echo "slave1
slave2
" > /usr/local/hadoop/etc/hadoop/workers
## 初始化节点,
$HADOOP_HOME/bin/hadoop namenode -format
# /************************************************************
# SHUTDOWN_MSG: Shutting down NameNode at befc49964aae/172.17.0.2
# ************************************************************/
保存镜像
docker commit -m "hadoop" bef ubuntu:hadoop
# sha256:5c9093ecff959c7dcf880f416cb4bab2d222bb4908c721ce40638abbec628e23
构建集群
## 构建docker网络
docker network create hadoop
## 启动一个master 两个slave
docker run -it --rm --network hadoop -h master --name master -p 9870:9870 -p 8088:8088 ubuntu:hadoop /bin/bash
docker run -it --rm --network hadoop -h slave1 --name slave1 ubuntu:hadoop /bin/bash
docker run -it --rm --network hadoop -h slave2 --name slave2 ubuntu:hadoop /bin/bash
### 进入master容器,启动hadoop
cd /usr/local/hadoop/sbin/ && ./start-all.sh
访问集群
localhost:9870
localhost:8088
示例
cat ../README.txt > ../file.txt
./hadoop fs -mkdir /input
./hadoop fs -put ../file.txt /input
./hadoop fs -ls /input
./hadoop jar ../share/hadoop/mapreduce/hadoop-mapreduce-examples-3.2.1.jar wordcount /input /output