设置systemd自启守护进程

systemd 自启动守护进程

zookeeper
vi /usr/lib/systemd/system/zookeeper.service

[Unit]
Description=zookeeper
After=network.target

[Service]
Type=forking
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
#PIDFile=/opt/zookeeper/data/zookeeper_server.pid
Restart=always
RestartSec=0s
ExecStart=/opt/apache-zookeeper-3.6.2-bin/bin/zkServer.sh start
ExecStop=/opt/apache-zookeeper-3.6.2-bin/bin/zkServer.sh stop
ExecReload=/opt/apache-zookeeper-3.6.2-bin/bin/zkServer.sh restart
#PrivateTmp=true

[Install]
WantedBy=multi-user.target

kafka
vi /usr/lib/systemd/system/kafka.service
[Unit]
Description=kafka
Requires=network.target  zookeeper.service
After=network.target  zookeeper.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
#TimeoutSec=120
#SuccessExitStatus=143
User=root
Group=root
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
#Environment="PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/java/jdk1.8.0_181-cloudera/bin"
ExecStart=/opt/kafka_2.12-2.7.0/bin/kafka-server-start.sh -daemon /opt/kafka_2.12-2.7.0/config/server.properties
ExecStop=/opt/kafka_2.12-2.7.0/bin/kafka-server-stop.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
hadoop

namenode

vi /usr/lib/systemd/system/namenode.service
[Unit]
Description=hadoop
After=network.target

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startnn.sh
ExecStop=/opt/start/stopnn.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/startnn.sh
#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh start namenode
hdfs --daemon start namenode

vi /opt/start/stopnn.sh

#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh stop namenode
hdfs --daemon stop namenode

datanode

vi /usr/lib/systemd/system/datanode.service
[Unit]
Description=hadoop
After=network.target namenode.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startdn.sh
ExecStop=/opt/start/stopdn.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/startdn.sh
#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh start namenode
hdfs --daemon start datanode

vi /opt/start/stopdn.sh

#!/bin/bash
source /etc/profile
#/opt/hadoop-3.0.3/sbin/hadoop-daemon.sh stop namenode
hdfs --daemon stop datanode

resourcemanager

vi /usr/lib/systemd/system/resourcemanager.service
[Unit]
Description=hadoop
After=network.target datanode.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startrm.sh
ExecStop=/opt/start/stoprm.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/startrm.sh
#!/bin/bash
source /etc/profile
yarn --daemon start resourcemanager

nodemanager同resourcemanager

yarn --daemon start nodemanager

proxyserver同resourcemanager

yarn --daemon start proxyserver

historyserver同resourcemanager

mapred --daemon start historyserver
spark
vi /usr/lib/systemd/system/spark.service
[Unit]
Description=spark
Requires=network.target
After=network.target

[Service]
#Restart=always
#RestartSec=1
Type=forking
#TimeoutSec=120
#SuccessExitStatus=143
User=root
Group=root
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
ExecStart=/opt/spark-2.3.1-bin-hadoop2.7/sbin/start-all.sh
ExecStop=/opt/spark-2.3.1-bin-hadoop2.7/sbin/stop-all.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
flink
vi /usr/lib/systemd/system/flink.service
[Unit]
Description=flink-1.12.1 service
#After=syslog.target network.target zookeeper.service
After=network.target

[Service]
Restart=always
RestartSec=1
Type=forking
TimeoutSec=120

User=root
Environment=JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera

ExecStart=/opt/flink-1.12.1/bin/start-cluster.sh
ExecStop=/opt/flink-1.12.1/bin/stop-cluster.sh

[Install]
WantedBy=multi-user.target
hive
vi /usr/lib/systemd/system/hive.service
[Unit]
Description=hive
After=network.target nodemanager.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/starthive.sh
ExecStop=/opt/start/stophive.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
vi /opt/start/starthive.sh
#!/bin/bash
source /etc/profile
nohup hive --service hiveserver2 > /opt/start/hive.log &
vi /opt/start/stophive.sh
#!/bin/bash
/opt/start/kill.sh hiveserver2
vi /opt/start/kill.sh
#!/bin/sh
#根据进程名杀死进程
if [ $# -lt 1 ]
then
  echo "缺少参数:procedure_name"
  exit 1
fi

PROCESS=`ps -ef|grep $1|grep -v grep|grep -v PPID|awk '{ print $2}'`
for i in $PROCESS
do
  echo "Kill the $1 process [ $i ]"
  kill -9 $i
done

metastore

vi /opt/start/startmetastore.sh
#!/bin/bash
source /etc/profile
nohup hive  --service metastore > /opt/start/hivemetastore.log &
vi /opt/start/stopmetastore.sh
#!/bin/bash
/opt/start/kill.sh metastore
vi /usr/lib/systemd/system/metastore.service
[Unit]
Description=hive
After=network.target nodemanager.service

[Service]
#Restart=always
#RestartSec=1
Type=forking
TimeoutSec=120
#SuccessExitStatus=143
User=root
ExecStart=/opt/start/startmetastore.sh
ExecStop=/opt/start/stopmetastore.sh
#PrivateTmp=true

[Install]
WantedBy=multi-user.target
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值