【root下】
cd 桌面
cd Softwore
tar xzvf jdk-...... -C /opt
tar xzvf hadoop-...... -C /opt
tar xzvf zookeeper-...... -C /opt
tar xzvf apache-flume-...... -C /opt
tar xzvf apache-hive-..... -C /opt
tar xzvf hbase-.... -C /opt
tar xzvf sqoop-...... -C /opt
cd /opt
mv jdk-... java
mv hadoop-... hadoop
mv zookeeper-.... zookeeper
mv apache-flume-... flume
mv apache-hive-.... hive
mv hbase-.... hbase
mv sqoop-...... sqoop
vim /etc/profile
export JAVA_HOME=/opt/java
export HADOOP_HOME=/opt/hadoop
export ZOOKEEPER_HOME=/opt/zookeeper
export FLUME_HOME=/opt/flume
export HIVE_HOME=/opt/hive
export HBASE_HOME=/opt/hbase
export SQP_HOME=/opt/sqoop
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export PATH=$PATH:$ZOOKEEPER_HOME/bin:$FLUME_HOME/bin:$HIVE_HOME/bin:$HBASE_HOME/bin:$SQOOP_HOME/bin
export CALSSPATH=$CLASSPATH:$SQOOP_HOME/lib
【Hadoop下】
#在Hadoop下
source /etc/profile
(配置网络)##配置三台
cd /etc/sysconfig/network-scripts
rm ifcfg-eth0
vim /etc/hosts
192.168.120.101 master
192.168.120.102 slave1
192.168.120.103 slave2
(配置免密)##配置三台
ssh-keygen -t rsa -P ''
ssh-copy-id master
ssh-copy-id slave1
ssh-copy-id slave2
(配置集群)
cd /opt/hadoop/etc/hadoop
vim core-site.xml
fs.defaultFS
hdfs://master:9000
hadoop.tmp.dir
/opt/hadoop/tmp
io.file.buffer.size
131072
vim hdfs....
dfs.namenode.name.dir
/opt/hadoop/dfs/name
dfs.datanode.data.dir
/opt/hadoop/dfs/data
dfs.replication
3
dfs.blocksize
268435456
vim yarn....
yarn.resourcemanager.address master:8032
yarn.resourcemanager.scheduler.address master:8030
yarn.resourcemanager.resource-tracker.address master:8031
yarn.resourcemanager.admin.address master:8033
yarn.resourcemanager.webapp.address master:8088
yarn.nodemanager.aux-services mapreduce_shuffle
yarn.nodemanager.aux-services.mapreduce.shuffle.class org.apache.hadoop.mapred.ShuffleHandler
vim mapreduce.....
mapreduce.framework.name
yarn
vim hadoop....
/opt/java
chown -R hadoop:hadoop /opt/hadoop
(格式化namenode)
hdfs namenode -format
scp -r /opt slave1:/
(启动集群)
start-all.sh
【关闭防火墙】
systemctl stop firewalld
systemctl status firewalld
systemctl disable firewalld
【zookeeper配置】
cd /opt/zookeeper
mkdir data
mkdir logs
cd data
vim myid
1
cd ..
cd conf
cp zoo_s..... zoo.cfg
vim zoo.cfg
dataDir=/opt/zookeeper/data
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
chown -R hadoop:hadoop /opt/zookeeper
scp -r /opt/zookeeper slave1:/opt
scp -r /opt/zookeeper slave2:/opt
进入slave1、2 将myid中的1分别改为2和3
(启动zookeeper)
zkServer.sh start #三台都
(查看zookeeper状态)
zkServer.sh status #三台
【flume配置】
cd /opt
chown -R hadoop:hadoop flume
cd /opt/flume/conf
cp flume -env.sh.template flume-env.sh
vim flume-env.sh
export JAVA_HOME=/opt/java
flume-ng version #查看flume是否安装成功
cd /opt/flume/conf
vim simple-hdfs-flume.conf
a1.sources=r1
a1.sinks=k1
a1.channels=c1
a1.sources.r1.type=spooldir
a1.sources.r1.spoolDir=/opt/hadoop/logs/
a1.sources.r1.fileHeader=true
a1.sinks.k1.type=hdfs
a1.sinks.k1.hdfs.path=hdfs://master:9000/flume
a1.sinks.k1.hdfs.rollsize=1048760
a1.sinks.k1.hdfs.rollCount=0
a1.sinks.k1.hdfs.rollInterval=900
a1.sinks.k1.hdfs.useLocalTimeStamp=true
a1.channels.c1.type=file
a1.channels.c1.capacity=1000
a1.channels.c1.transactionCapacity=100
a1.sources.r1.channels=c1
a1.sinks.k1.channel=c1
flume-ng agent --conf-file simple-hdfs-flume.conf--name a1
hdfs dfs -ls /flume
【安装MYSQL和Hive】
chown -R hadoop:hadoop /opt/hive
cd /Software
unzip mysql-5.7.1.....
yum remove mariadb-lib
rpm -qa | grep mariadb ##查看已安装的mariaDB安装包
rpm -e --nodeps mariadb-libs-... ##卸载mariaDB安装包
rpm -ivh mysql-community-common-......
rpm -ivh mysql-community-libs-......
rpm -ivh mysql-community-client-......
rpm -ivh mysql-community-server-......
systemctl status mysqld ##查看MySQL数据库的状态
vim /var/log/mysql..... ##在MySQL文件中查看MySQL的密码
/password
mysql -u root -p
粘贴刚复制的密码,就能进入mysql
[修改密码为Password123$]
mysql_secure_installation
Enter password for user root:
1、输入mysqld.log文件中查询到的密码
2、new password : Password123$
3、 Re-enter new password: Password123$
4、 y 5、y 6、n 7、y 8、y 9、y
mysql -u root -p
grant all privileges on *.* to root@'master' identified by 'Password123$';
grant all privileges on *.* to root@'%' identified by 'Password123$';
flush privileges ##刷新授权
exit
##在Hadoop下将hive文件下的hive-default.xml.template文件复制并且改名为hive-site.xml
cp hive-default.xml.template hive-site.xml
vim hive-site.xml
javax.jdo.option.ConnectionURL
jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&useSSL=false
javax.jdo.option.ConnectionPassword
Password123$
hive.metastore.schema.verification
flase
javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver
javax.jdo.option.ConnectionUserName
root
hive.querylog.location
/opt/hive/tmp
hive.exec.local.scratchdir
/opt/hive/tmp
hive.downloaded.resources.dir
/opt/hive/tmp/resources
hive.server2.logging.operation.log.location
/opt/hive/tmp/operation_log
mkdir /opt/hive/tmp
##root下
cp ~/mysql-connector-java-5.1.47.jar /opt/hive/lib
chown -R hadoop:hadoop /opt/hive/lib/mysql-connector-java-5.1.47.jar
schmatool -initSchema -dbType mysql
##若成功则提示schematool completed,否则要删除MySQL中的hive数据库后重新执行schmatool -initSchema -dbType mysql
mysql -u root -p
show databases;
drop database hive;
exit
##执行hive
hive
【配置HBase】
cd /opt/hbase/conf
vim hbase-env.sh
#找到以下两个
export JAVA_HOME=/opt/java
export HBASE_MANAGES_ZK=false
vim hbase-site.xml
<name>hbase.rootdir</name>
<value>hdfs://master:9000/hbase</value>
<name>hbase.master.info.port</name>
<value>60010</value>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
<name>zookeeper.session.timeout</name>
<value>120000</value>
<name>hbase.zookeeper.quorum</name>
<value>master,slave1,slave2</value>
<name>hbase.tmp.dir</name>
<value>/opt/hbase/tmp</value>
<name>hbase.cluster.distributed</name>
<value>true</value>
vim regionservers
#删除localhost,改为
slave1
slave2
mkdir /opt/hbase/tmp
scp -r /opt/hbase slave1:/opt
scp -r /opt/hbase slave2:/opt
chown -R hadoop:hadoop /opt/hbase
start-hbase.sh
如果启动后没有则需要在slave1和slave2中同步时间
(vim /etc/ntp.conf)
restrict 192.168.120.0 mask 255.255.255.0 nomodify notrap
server 127.127.1.0
fudge 127.127.1.0 startnum 10
systemctl restart ntpd #在master中的root下
ntpdate master #在slave1与2下的root用户中
【sqoop配置】
tar xzvf sqoop-...... -C /opt
cd /opt
mv sqoop-...... sqoop
cd sqoop/conf
cp sqoop-env-template.sh sqoop-env.sh
vim sqoop-env.sh
export HADOOP_COMMON_HOME=/opt/hadoop
export HADOOP_MAPRED_HOME=/opt/hadoop
export HABSE_HOME=/opt/hbase
export HIVE_HOME=/opt/hive
vim /etc/profile
export SQOOP_HOME=/opt/sqoop
export PATH=$PATH:$SQOOP_HOME/bin
export CALSSPATH=$CLASSPATH:$SQOOP_HOME/lib
source /etc/profile
cp software/mysql-connect......jar /opt/sqoop/lib
start-all.sh
##测试sqoop是否能正常连接MySQL数据库
sqoop list-databases --connect jdbc:mysql://master:3306/ --username root -P
cp /opt/hive/lib/hive-common-......jar /opt/sqoop/lib/
uname -a #查看Linux操作系统信息
fdisk -l #查看硬盘信息(在root下)
swapon -s #查看所有交换分区
df -h #查看文件系统占比
ip a #查看IP地址(与ifconfig相似)
netstat -lntp #查看所有监听端口
netstat -antp #查看所有已经建立的链接
top #实时显示进程状态(按q退出)
ps aux | grep mysqld #查看mysql的进程
cat /proc/cpuinfo #查看CPU信息