脚本安装大数据工具

大数据工具安装脚本

目前 canal,Maxwell 还没安装

注:此脚本安装了jdk,hadoop,docker,mysql,hive,scala,spark,zookeeper,kafka,Flume,Hbase,mongodb,canal,Maxwell

       以下为安装脚本,将安装包放入 /opt/install 目录下,建立 /opt/bigdata 目录用来做安装目录,脚本放在 /root/bin 目录下,给其执行权限
注意:脚本中的 hostname 和 ip 需要换成自己机器的

#!/bin/bash

#关闭防火墙,重启网络服务
systemctl stop firewalld
systemctl disable firewalld
systemctl restart network.service

#安装jdk
tar -zxvf /opt/install/jdk-8u111-linux-x64.tar.gz -C /opt/bigdata/

mv /opt/bigdata/jdk1.8.0_111/ /opt/bigdata/jdk180

touch /etc/profile.d/env.sh

echo 'export JAVA_HOME=/opt/bigdata/jdk180' >> /etc/profile.d/env.sh
echo 'export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar' >> /etc/profile.d/env.sh
echo 'export PATH=$PATH:$JAVA_HOME/bin' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh

source /etc/profile.d/env.sh

#安装hadoop
tar -zxvf /opt/install/hadoop-2.6.0-cdh5.14.2.tar.gz -C /opt/bigdata/

rm -rf ./hadoop260

mv /opt/bigdata/hadoop-2.6.0-cdh5.14.2/ /opt/bigdata/hadoop260

mv /opt/bigdata/hadoop260/etc/hadoop/mapred-site.xml.template /opt/bigdata/hadoop260/etc/hadoop/mapred-site.xml

echo 'export JAVA_HOME=/opt/bigdata/jdk180' >> /opt/bigdata/hadoop260/etc/hadoop/hadoop-env.sh

echo '<configuration>
<property>
  <name>fs.defaultFS</name>
  <value>hdfs://192.168.145.131:9000</value>
</property>
<property>
   <name>hadoop.tmp.dir</name>
   <value>/opt/bigdata/hadoop260/hadoop2</value>
</property>
<property>
   <name>hadoop.proxyuser.root.hosts</name>
   <value>*</value>
 </property>
<property>
   <name>hadoop.proxyuser.root.groups</name>
   <value>*</value>
 </property>
</configuration>' > /opt/bigdata/hadoop260/etc/hadoop/core-site.xml

echo '<configuration>
<property>
  <name>dfs.replication</name>
  <value>1</value>
</property>
<property>
  <name>dfs.namenode.secondary.http-address</name>
  <value>love:50090</value>
</property>
</configuration>
' > /opt/bigdata/hadoop260/etc/hadoop/hdfs-site.xml

echo '<configuration>
<property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
</property>
<property>
        <name>mapreduce.jobhistory.address</name>
        <value>love:10020</value>
</property>
<property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>love:19888</value>
</property>
</configuration>
' > /opt/bigdata/hadoop260/etc/hadoop/mapred-site.xml

echo '<configuration>
<!-- reducer获取数据方式 -->
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- 指定YARN的ResourceManager的地址 -->
<property>
    <name>yarn.resourcemanager.hostname</name>
    <value>love</value>
</property>
<!-- 日志聚集功能使用 -->
<property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
</property>
<!-- 日志保留时间设置7-->
<property>
    <name>yarn.log-aggregation.retain-seconds</name>
    <value>604800</value>
</property>
</configuration>
' > /opt/bigdata/hadoop260/etc/hadoop/yarn-site.xml

echo 'localhost' > /opt/bigdata/hadoop260/etc/hadoop/slaves


echo 'export HADOOP_HOME=/opt/bigdata/hadoop260' >> /etc/profile.d/env.sh
echo 'export HADOOP_MAPRED_HOME=$HADOOP_HOME' >> /etc/profile.d/env.sh
echo 'export HADOOP_COMMON_HOME=$HADOOP_HOME' >> /etc/profile.d/env.sh
echo 'export HADOOP_HDFS_HOME=$HADOOP_HOME' >> /etc/profile.d/env.sh
echo 'export YARN_HOME=$HADOOP_HOME' >> /etc/profile.d/env.sh
echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native' >> /etc/profile.d/env.sh
echo 'export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"' >> /etc/profile.d/env.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh

hadoop namenode -format


#安装Docker
sudo yum update

sudo yum install -y yum-utils device-mapper-persistent-data lvm2 
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 
sudo yum install docker-ce
sudo systemctl enable docker
sudo systemctl start docker
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://wgbl53iz.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
docker pull mysql:5.7 
sudo docker run -p 3306:3306 --name mysql -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7
docker start mysql


#安装hive
tar -zxvf /opt/install/hive-1.1.0-cdh5.14.2.tar.gz -C /opt/bigdata/
mv /opt/bigdata/hive-1.1.0-cdh5.14.2/ /opt/bigdata/hive110
cp /opt/install/mysql-connector-java-5.1.27-bin.jar /opt/bigdata/hive110/lib/

touch /opt/bigdata/hive110/conf/hive-site.xml


echo '<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
        <name>javax.jdo.option.ConnectionURL</name>mysql
        <value>jdbc:mysql://192.168.145.131:3306/hive110?createDatabaseIfNotExist=true</value>
    </property>
<property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.jdbc.Driver</value>
    </property>
<property>
        <name>javax.jdo.option.ConnectionUserName</name>
        <value>root</value>
    </property>
<property>
        <name>javax.jdo.option.ConnectionPassword</name>
        <value>123456</value>
    </property>
<property>
    <name>hive.server2.thrift.client.user</name>
    <value>root</value>
    <description>Username to use against thrift client</description>
  </property>
  <property>
    <name>hive.server2.thrift.client.password</name>
    <value>123456</value>
    <description>Password to use against thrift client</description>
  </property>
</configuration>
' > /opt/bigdata/hive110/conf/hive-site.xml



echo 'export HIVE_HOME=/opt/bigdata/hive110' >> /etc/profile.d/env.sh
echo 'export PATH=$PATH:$HIVE_HOME/bin' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh
source /etc/profile.d/env.sh

start-all.sh

schematool -dbType mysql -initSchema

hadoop fs -chown -R hive:hive /tmp
hadoop fs -chmod -R 777 /tmp

#安装scala
tar -zxvf /opt/install/scala-2.12.10.tgz -C /opt/bigdata/

echo 'export SCALA_HOME=/opt/bigdata/scala-2.12.10' >> /etc/profile.d/env.sh
echo 'export PATH=$PATH:$SCALA_HOME/bin' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh
      

#安装spark
tar -zxvf /opt/install/spark-2.4.5-bin-hadoop2.6.tgz -C /opt/bigdata/
mv /opt/bigdata/spark-2.4.5-bin-hadoop2.6/ /opt/bigdata/spark245

mv /opt/bigdata/spark245/conf/spark-env.sh.template /opt/bigdata/spark245/conf/spark-env.sh

echo'
export SCALA_HOME=/opt/bigdata/scala-2.12.10
export JAVA_HOME=/opt/bigdata/jdk180
export HADOOP_HOME=/opt/bigdata/hadoop260
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
SPARK_MASTER_IP=Master
SPARK_LOCAL_DIRS=/opt/bigdata/spark245
SPARK_DRIVER_MEMORY=512M
' >> /opt/bigdata/spark245/conf/spark-env.sh
	
echo 'export SPARK_HOME=/opt/bigdata/spark245
export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin
' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh


#安装zookeeper
 tar -zxvf /opt/install/zookeeper-3.4.5-cdh5.14.2.tar.gz -C /opt/bigdata/
 mv /opt/bigdata/zookeeper-3.4.5-cdh5.14.2/ /opt/bigdata/zk345
 mv /opt/bigdata/zk345/conf/zoo_sample.cfg /opt/bigdata/zk345/conf/zoo.cfg
echo 'dataDir=/opt/bigdata/zk345
dataLogDir=/opt/bigdata/zk345
' >> /opt/bigdata/zk345/conf/zoo.cfg

echo 'export ZOOKEEPER_HOME=/opt/bigdata/zk345
export PATH=$PATH:$ZOOKEEPER_HOME/bin
' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh


#安装kafka
tar -zxvf /opt/install/kafka_2.11-2.0.0.tgz -C /opt/bigdata/
mv /opt/bigdata/kafka_2.11-2.0.0/ /opt/bigdata/kafka211

cp /opt/bigdata/kafka211/config/server.properties /opt/bigdata/kafka211/config/server.caicai

echo 'delete.topic.enable=true
auto.create.topics.enable = false
log.dirs=/opt/bigdata/kafka211/kafka-logs
advertised.listeners=PLAINTEXT://192.168.145.131:9092
zookeeper.connect=192.168.145.131:9092
' > /opt/bigdata/kafka211/config/server.properties

cat /opt/bigdata/kafka211/config/server.caicai >> /opt/bigdata/kafka211/config/server.properties

echo 'export KAFKA_HOME=/opt/bigdata/kafka211
export PATH=$PATH:$KAFKA_HOME/bin
' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh


#安装flume
tar -zxvf /opt/install/flume-ng-1.6.0-cdh5.14.0.tar.gz -C /opt/bigdata/
mv /opt/bigdata/apache-flume-1.6.0-cdh5.14.0-bin/ /opt/bigdata/flume160
mv /opt/bigdata/flume160/conf/flume-env.sh.template /opt/bigdata/flume160/conf/flume-env.sh

echo '
##添加 JAVA_HOME
export JAVA_HOME=/opt/bigdata/jdk180
##配置JAVA_OPTS
export JAVA_OPTS="-Xms100m -Xmx10000m -Dcom.sun.management.jmxremote"
' >> /opt/bigdata/flume160/conf/flume-env.sh

echo 'export FLUME_HOME=/opt/bigdata/flume160
export PATH=$FLUME_HOME/bin:$PATH' >> /etc/profile.d/env.sh

source /etc/profile.d/env.sh


#安装Hbase
tar -zxvf /opt/install/hbase-1.2.0-cdh5.14.2.tar.gz -C /opt/bigdata/
mv /opt/bigdata/hbase-1.2.0-cdh5.14.2/ /opt/bigdata/hbase120

echo 'export JAVA_HOME=/opt/bigdata/jdk180
export HBASE_MANAGES_ZK=false' >> /opt/bigdata/hbase120/conf/hbase-env.sh

echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-->

<configuration>
<property>
         <name>hbase.rootdir</name>
         <value>hdfs://192.168.145.131:9000/hbase120</value>
</property>
<property>
         <name>hbase.cluster.distributed</name>
         <value>true</value>
</property>
<property>
         <name>hbase.zookeeper.property.dataDir</name>
         <value>/opt/bigdata/hbase120</value>
</property>
<property>
         <name>hbase.zookeeper.property.clientPort</name>
        <value>2181</value>
</property>
</configuration>

' > /opt/bigdata/hbase120/conf/hbase-site.xml

echo 'export HBASE_HOME=/opt/bigdata/hbase120
export PATH=$PATH:$HBASE_HOME/bin' >> /etc/profile.d/env.sh
source /etc/profile.d/env.sh

echo 'dataDir=/opt/bigdata/hbase120
server.0=192.168.145.131:2287:3387' >> /opt/bigdata/zk345/conf/zoo.cfg


#安装Mongodb
curl -O https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.0.6.tgz
mv mongodb-linux-x86_64-3.0.6.tgz /opt/install
tar -zxvf /opt/install/mongodb-linux-x86_64-3.0.6.tgz -C /opt/bigdata/
mv /opt/bigdata/mongodb-linux-x86_64-3.0.6/ /opt/bigdata/mongodb306
echo 'export MONGODB_HOME=/opt/bigdata/mongodb306
export PATH=$PATH:$MONGODB_HOME/bin
' >> /etc/profile.d/env.sh
source /etc/profile.d/env.sh
source /etc/profile.d/env.sh
mkdir -p /data/db

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值