5月13日用

免密登录

ifconfig

vi /etc/hosts

scp /etc/hosts slave1:/etc/

scp /etc/hosts slave2:/etc/

ssh-keygen -t rsa

ssh-copy-id master

ssh-copy-id slave1

ssh-copy-id slave2

jdk解压安装

cd /opt/software

mkdir /opt/module

tar –zxvf jdk-tab键 –C /opt/module

cd /opt/module

mv jdk-……/ jdk

ls查看

vi /etc/profile

export JAVA_HOME=/opt/module/jdk

export PATH=$JAVA_HOME/bin:PATH

source /etc/profile

java –version

slave1/slave2创建

mkdir /opt/module

mkdir /opt/module

scp –r /opt/module/jdk slave1:/opt/module/jdk

scp –r /opr/module/jdk slave2:/opt/module/jdk

slave1: vi /etc/profile

        export JAVA_HOME=/opt/module/jdk

        source /etc/profile

        java –version

slave2: vi /etc/profile

        export JAVA_HOME=/opt/module/jdk

        source /etc/profile

        java -version

Hadoop配置

cd /opt/software

tar -zxcf /opt/software/Hadoop…… -C /opt/module

cd /opt/module

mv Hadoop……/ Hadoop

vi /etc/profile

export HADOOP_HOME=/opt/module/Hadoop

export PATH=$HADOOP/bin:$PATH

source /etc/profile

hadoop version

vim core-site.xml

<property>

    <name>fs.defaultFS</name>

    <value>hdfs://master:9000</value>

</property>

<property>

    <name>hadoop.tmp.dir</name>

    <value>/opt/module/Hadoop/data</value>

</property>

vim hdfs-site.xml

<property>

    <name>dfs.namenode.http-address</name>

<value>master:50070</value>

</property>

<property>

    <name>dfs.replication</name>

    <value>1</value>

</property>

vim yarn-site.xml

<property>

    <name>yarn.nodemanager.zux-services</name?

    <value>mapreduce_shuffle</value>

</property>

<property>

    <name>yarn.resourcemanager.hostname</name>

    <value>master</value>

</property>

cp mapred-site.xml.template mapred-site.xml

vi mapred-site.xml

<property>

    <name>mapreduce.framwork.name</name>

    <value>yarn</value>

vim workers

master

slave1

slave2

vi hadoop-env.sh

export JAVA_HOME=/opt/module/jdk

vi mapred-env.sh

export JAVA_HOME=/opt/module/jdk

vi yarn-env.sh

export JAVA_HOME=/opt/module/jdk

scp -r /opt/module/hadoop/ slave1:/opt/module/hadoop/

scp -r /opt/module/hadoop/ slave2:/opt/module/hadoop/

hdfs namenode -format

start-all.sh

Flume配置

cd /opt/software

mkdir /opt/module

cd /opt/module

tar –zxvf /opt/software/apache-flume…… -C /opt/module

mv /opt/module/apache-……/ flume

vim /etc/profile

export FLUME_HOME=/opt/module/flume

export PATH=$FMULE_HOME/bin:$PATH

source /etc/profile

rm –rf /opt/module/flume/lib/guava-……

vim /opt/module/flume/conf/log4j.

flume.log.dir=/opt/module/flume/logs

cd /opt/hadoop/sbin

./start-all.sh

Cd /opt/apache-flume-……/bin

Flume-ng agent –-conf conf/ --name a1 –-conf-file /opt/module/flume/conf/log4j/properties –Dflume.root.logger=INFO,console

Spark 配置

cd /data/bigdata

tar  -xvf  /data/software/spark-2.3.2-bin-hadoop2.7.tgz

mv spark-bin-hadoop2.7    spark-2.3.2

cd

vim   /etc/profile

export SPARK_HOME=/data/bigdata/spark-2.3.2

export PATH=$SPARK_HOME/BIN:$PATH

source  /etc/profile

cd /data/bigdata/spark-2.3.2/conf

cp spark-env.sh.template    spark-env.sh

vim spark-env.sh

export JAVA_HOME=/opt/jdk

export HADOOP_CONF_DIR=/opt/hadoop-2.7.3/etc/hadoop

export SPARK_DIST_CLASSPATH=$(/opt/hadoop-2.7.3/bin/hadoop  classpath)

cd  /data/bigdata

scp  -r  spark-2.3.2  root@slave1:/data/bigdata

scp  -r  spark-2.3.2  root@slave2:/data/bigdata

cd /data/bigdata/sprk-2.3.2/conf

cp  slaves.template   slaves

vim  slaves

去掉localhost

master     slave1     slave2

cd   /data/bigdata/spark-2.3.2

./sbin/start-all.sh

Jps

Master    jps     worker

FLINK配置

mkdir   /opt/module

tar  –xvf  /opt/software/flink-1.14.6-bin-scala_2.12.tgz  -C/opt/ module

mv  /opt/module/flink-1.14.6/    /opt/module/flink

vi  /etc/profile

export FLINK_HOME=/opt/module/flink

export PATH=“$FLINK_HOME/bin:$PATH”

soure  /etc/profile

vim   /opt/module/flink/conf/flink-conf.yaml

内容jobmanager.rpc.address:master

Vim /opt/module/flink/conf/workers

Master   slave1    slave2

Scp  –r  /opt/module/flink    slave1:/opt/module

Scp  -r   /opt/module/flink    slave2:/opt/module

./bin/start-cluster.sh

Cd  /opt/module/flink/bin

./flink  run  -m   master:8081  /opt/module/flink/examples/batch/WordCount.jar

def main(args: Array[String]): Unit = {

System.setProperty("HADOOP_USER_NAME","root")

val sparkSession: SparkSession = SparkSession.builder()

.master("local[*]")

.config("dfs.client.use.datanode.hostname","true")

.config("hive.exec.dynamic.partition.mode","nonstrict")

.config("spark.sql.warehouse.dir","hdfs://192.168.23.94:9820/user/hive/wa

rehouse/")

.appName("spark read mysql")

.enableHiveSupport()

.getOrCreate()

val MYSQLDBURL: String = "jdbc:mysql://192.168.23.94:3306/ds_pub?useUnicode=true&characterEncoding=utf-8"

val properties: Properties = new Properties()

properties.put("user", "root")

properties.put("password", "123456")

properties.put("driver", "com.mysql.jdbc.Driver")

val readMySQLDF: DataFrame = sparkSession.read.jdbc(MYSQLDBURL,"order_info", properties)

val readMySQLDF2: DataFrame = readMySQLDF.withColumn("etl_date",lit("20230401"))

readMySQLDF2.write

.mode(SaveMode.Overwrite)

.format("hive")

.partitionBy("etl_date")

.saveAsTable("ods.order_info_par")

sparkSession.close()

}

1.      export JAVA_HOME=/opt/jdk

2.      export HBASE_CLASSPATH=/opt/module/hadoop-2.7.3/etc/hadoop

3.      export HBASE_MANAGES_ZK=false

                                                                    

1.      <configuration>

2.      <property>

3.               <name>hbase.rootdir</name>

4.               <value>hdfs://master:9000/hbase</value>

5.      </property>

6.      <property>

7.               <name>hbase.cluster.distributed</name>

8.               <value>true</value>

9.      </property>

10.   <property>

11.            <name>hbase.zookeeper.quorum</name>

12.            <value>master:2181,slave1:2181,slave2:2181</value>

13.   </property>

14.   </configuration>

Clickhouse

./clickhuose  -dient-2… -C /opt/module

设置远程访问移除监听文件

vim /opt/module/clickhouse-server-…/config.xml

内容:<tcp-port>9001</tcp-port>

注释文件:<listen-host>::</listen-host>

启动:cd /opt/module/deckhouse

     Deckhouse start

查看状态

Deckhouse status

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值