【DolphinScheduler】部署实录

25 篇文章 0 订阅

先要安装并启动zookeeper


Download

https://mirrors.tuna.tsinghua.edu.cn/apache/dolphinscheduler/1.3.6/apache-dolphinscheduler-1.3.6-bin.tar.gz

下载页面
https://www.apache.org/dyn/closer.lua/dolphinscheduler/1.3.6/apache-dolphinscheduler-1.3.6-bin.tar.gz

每个节点创建用户和sudo

useradd dolphinscheduler
echo "dolphinscheduler123" | passwd --stdin dolphinscheduler

# Configure sudo passwordless 无密码使用sudo
echo 'dolphinscheduler  ALL=(ALL)  NOPASSWD: NOPASSWD: ALL' >> /etc/sudoers
sed -i 's/Defaults    requirett/#Defaults    requirett/g' /etc/sudoers

数据库初始化

登录数据库创建用户及密码
postgres=# create user dolphinscheduler with password 'dolphinscheduler';
CREATE ROLE
创建数据库
postgres=# CREATE DATABASE dolphinscheduler TEMPLATE template0 ENCODING UTF8 LC_COLLATE 'zh_CN.utf8' LC_CTYPE 'zh_CN.utf8' owner dolphinscheduler;
赋予全部权限
CREATE DATABASE
postgres=# grant all on database dolphinscheduler to dolphinscheduler;
GRANT
postgres=# 
postgres=# quit


[postgres@master ~]$ psql -h 192.168.1.30 -p 5432 -d dolphinscheduler -U dolphinscheduler -w dolphinscheduler
psql: warning: extra command-line argument "dolphinscheduler" ignored
psql (13.2)
SSL connection (protocol: TLSv1.2, cipher: ECDHE-RSA-AES128-GCM-SHA256, bits: 128, compression: off)
Type "help" for help.

dolphinscheduler=> 

vi conf/datasource.properties

#postgre
  #spring.datasource.driver-class-name=org.postgresql.Driver
  #spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler
  # mysql
  spring.datasource.driver-class-name=com.mysql.jdbc.Driver
  spring.datasource.url=jdbc:mysql://xxx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true     # Replace the correct IP address
  spring.datasource.username=xxx						# replace the correct {user} value
  spring.datasource.password=xxx						# replace the correct {password} value

创建系统表(重要!)
sh script/create-dolphinscheduler.sh

修改配置

conf/env/dolphinscheduler_env.sh

#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

export HADOOP_HOME=
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export FLINK_HOME=/opt/soft/flink
export DATAX_HOME=/opt/soft/datax

export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$DATAX_HOME/bin:$PATH

conf/config/install_config.conf

# choose mysql or postgresql
dbtype="postgresql"

# Database connection address and port
dbhost="192.168.xx.xx:3306"

# database name
dbname="dolphinscheduler"

# database username
username="xxx"

# database password
# NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[`
password="xxx"

#Zookeeper cluster
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"

# Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd)
installPath="/opt/soft/dolphinscheduler"

# deployment user
# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="dolphinscheduler"

# alert config,take QQ email for example
# mail protocol
mailProtocol="SMTP"

# mail server host
mailServerHost="smtp.qq.com"

# mail server port
# note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.
mailServerPort="25"

# mail sender
mailSender="xxx@qq.com"

# mail user
mailUser="xxx@qq.com"

# mail sender password
# note: The mail.passwd is email service authorization code, not the email login password.
mailPassword="xxx"

# Whether TLS mail protocol is supported,true is supported and false is not supported
starttlsEnable="true"

# Whether TLS mail protocol is supported,true is supported and false is not supported。
# note: only one of TLS and SSL can be in the true state.
sslEnable="false"

# note: sslTrust is the same as mailServerHost
sslTrust="smtp.qq.com"


# resource storage type:HDFS,S3,NONE
resourceStorageType="HDFS"

# If resourceStorageType = HDFS, and your Hadoop Cluster NameNode has HA enabled, you need to put core-site.xml and hdfs-site.xml in the installPath/conf directory. In this example, it is placed under /opt/soft/dolphinscheduler/conf, and configure the namenode cluster name; if the NameNode is not HA, modify it to a specific IP or host name.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,s3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://mycluster:8020"


# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
yarnHaIps="192.168.xx.xx,192.168.xx.xx"

# if resourcemanager HA enable or not use resourcemanager, please skip this value setting; If resourcemanager is single, you only need to replace yarnIp1 to actual resourcemanager hostname.
singleYarnIp="yarnIp1"

# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。/dolphinscheduler is recommended
resourceUploadPath="/dolphinscheduler"

# who have permissions to create directory under HDFS/S3 root path
# Note: if kerberos is enabled, please config hdfsRootUser=
hdfsRootUser="hdfs"



# install hosts
# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
ips="ds1,ds2,ds3,ds4"

# ssh port, default 22
# Note: if ssh port is not default, modify here
sshPort="22"

# run master machine
# Note: list of hosts hostname for deploying master
masters="ds1,ds2"

# run worker machine
# note: need to write the worker group name of each worker, the default value is "default"
workers="ds3:default,ds4:default"

# run alert machine
# note: list of machine hostnames for deploying alert server
alertServer="ds2"

# run api machine
# note: list of machine hostnames for deploying api server
apiServers="ds1"

所有节点创建java命令软链接(必须的)

sudo ln -s /home/jdk1.8.0_181/bin/java /usr/bin/java

安装
sh install.sh

访问api

在这里插入图片描述

??用户名密码是啥,我操

admin
dolphinscheduler123

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值