# 1.linux系统基础配置
centos 6.5 hadoop 2.6.5 jdk 1.8
1.设置ip及主机名
1)设置服务器静态ip
vi "/etc/sysconfig/network-scripts/ifcfg-eth0
内容设置如下
DEVICE=eth0
#HWADDR=00:0C:29:49:11:17
TYPE=Ethernet
#UUID=467ba7da-3fcb-463c-9dd7-edbb930d708b
ONBOOT=yes
NM_CONTROLLED=yes
BOOTPROTO=static
IPADDR=192.168.111.111 #自己的ip网段
NETMASK=255.255.255.0 #
GATEWAY=192.168.111.2 #只要前面一样就行
DNS1=114.114.114.114 # 一样即可
2)service network restart # 重启网络
3)关闭防火墙
ervice iptables stop
chkconfig iptables off #设置开机不启动
vi /etc/selinux/config
SELINUX=disabled
3)设置host映射
vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.111.111 node1
192.168.111.112 node2
192.168.111.113 node3
192.168.111.114 node4
4)时间同步
yum -y install ntp
vi /etc/ntp.cnf
service ntp1.aliyun.com
service ntpd start
chkconfig ntpd on #开机自启动
5)安装jdk
jdk下载地址 jdk-8u202-linux-x64.rpm
rpm -i jdk-8u202-linux-x64.rpm
安装目录 /usr/java/下
*有一些软件只认:/usr/java/default
vi /etc/profile
export JAVA_HOME=/usr/java/default
export PATH=$PATH:$JAVA_HOME/bin
source /etc/profile | . /etc/profile
6)设置ssh免密登陆
ssh localhost 1,验证自己还没免密 2,被动生成了 /root/.ssh
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa #生成密钥
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
如果A 想 免密的登陆到B:
A:
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
B:
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
结论:B包含了A的公钥,A就可以免密的登陆
把自己的共钥给别人,别人把它加到已知用户
# 2hadoop配置
规划路径:
mkdir /opt/bigdata
tar xf hadoop-2.6.5.tar.gz
mv hadoop-2.6.5 /opt/bigdata/
pwd
/opt/bigdata/hadoop-2.6.5
vi /etc/profile
export JAVA_HOME=/usr/java/default
export HADOOP_HOME=/opt/bigdata/hadoop-2.6.5
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile
配置hadoop的角色:
cd $HADOOP_HOME/etc/hadoop
必须给hadoop配置javahome要不ssh过去找不到
vi hadoop-env.sh
export JAVA_HOME=/usr/java/default
给出NN角色在哪里启动
vi core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://node01:9000</value>
</property>
配置hdfs 副本数为1.。。。
vi hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/var/bigdata/hadoop/local/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/var/bigdata/hadoop/local/dfs/data</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>node1:50090</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>/var/bigdata/hadoop/local/dfs/secondary</value>
</property>
配置DN这个角色再那里启动
vi slaves
node1
3,初始化&启动:
hdfs namenode -format
创建目录
并初始化一个空的fsimage
VERSION
CID
start-dfs.sh
第一次:datanode和secondary角色会初始化创建自己的数据目录
http://node01:50070
修改windows: C:\Windows\System32\drivers\etc\hosts
192.168.150.11 node01
192.168.150.12 node02
192.168.150.13 node03
192.168.150.14 node04
4,简单使用:
hdfs dfs -mkdir /bigdata
hdfs dfs -mkdir -p /user/root