部署项目脚本

10 篇文章 0 订阅
2 篇文章 0 订阅

docker部署项目脚本

视频云项目应用部署

prod-dn 定南环境部署

Dockerfile_video_structurization_api
FROM java:8
VOLUME /tmp
COPY video_structurization_api.jar video_structurization_api.jar
EXPOSE 10003
ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","-Duser.timezone=GMT+8","/video_structurization_api.jar","--spring.profiles.active=prod-dn","--schedule.job=true"]
Dockerfile_grpc-pic-server
FROM java:8
VOLUME /tmp
COPY grpc-pic-server.jar grpc-pic-server.jar
EXPOSE 10012
EXPOSE 6001
ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","-Duser.timezone=GMT+8","/grpc-pic-server.jar","--spring.profiles.active=prod-dn"]
deploy.sh

可以连接外网直接运行启动(包含生成tar包)

#!/bin/bash
image_names=("video_structurization_api" "grpc-pic-server")

for name in ${image_names[@]}
do
echo "正在停止$name容器"
docker stop $name
echo "正在清理$name容器"
docker rm $name
echo "正在清理$name镜像"
docker rmi jgh/$name
echo "正在部署$name"
docker build -t jgh/$name . -f Dockerfile_$name
echo "正在生成$name.tar"
docker save  jgh/$name:latest  -o $name.tar
echo \n
done

echo "正在创建容器"
docker run -d -p 10003:10003 --restart always \
--name video_structurization_api --privileged=true \
--privileged=true \
-v /proc:/host/proc \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /etc/sysconfig/docker:/etc/sysconfig/docker \
-v /usr/bin/docker:/usr/bin/docker \
-v /data/yyt/java-project/rtsp-video/:/data/yyt/java-project/rtsp-video/ \
-v /data/yyt/java-project/video_structurization_api/:/data/yyt/java-project/video_structurization_api/ \
-v /data/yyt/java-project/logs/video_structurization/video_structurization_api/:/data/yyt/java-project/logs/video_structurization/video_structurization_api/ \
jgh/video_structurization_api

docker run -d -p 10012:10012 -p 6001:6001 --restart always \
--name grpc-pic-server \
-v /data/yyt/java-project/logs/video_structurization/grpc-pic-server/:/data/yyt/java-project/logs/video_structurization/grpc-pic-server/ \
jgh/grpc-pic-server

echo "部署完成"
deploy-er.sh

需要提前把镜像打成tar包,在运行

#!/bin/bash
image_names=("video_structurization_api" "grpc-pic-server")

#-------------检查tar包
discount=0
disfile=""
for myFile in ${image_names[@]}
do
if [ ! -f "$myFile.tar" ]; then
echo "$myFile.tar------>文件不存在"
let discount++
disfile=${disfile}" "${myFile}.tar
fi
done 

if [ $discount -gt 0 ];then
echo "请准备好上面不存才的$disfile文件"
exit
fi

echo "检查tar包通过"

for name in ${image_names[@]}
do
echo "正在停止$name容器"
docker stop $name
echo "正在清理$name容器"
docker rm $name
echo "正在清理$name镜像"
docker rmi jgh/$name
echo "正在加载镜像$name"
docker load -i  $name.tar
echo ""
done

echo "正在创建容器"
docker run -d -p 10003:10003 --restart always \
--name video_structurization_api --privileged=true \
--privileged=true \
-v /proc:/host/proc \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /etc/sysconfig/docker:/etc/sysconfig/docker \
-v /usr/bin/docker:/usr/bin/docker \
-v /data/yyt/java-project/rtsp-video/:/data/yyt/java-project/rtsp-video/ \
-v /data/yyt/java-project/video_structurization_api/:/data/yyt/java-project/video_structurization_api/ \
-v /data/yyt/java-project/logs/video_structurization/video_structurization_api/:/data/yyt/java-project/logs/video_structurization/video_structurization_api/ \
jgh/video_structurization_api

docker run -d -p 10012:10012 -p 6001:6001 --restart always \
--name grpc-pic-server \
-v /data/yyt/java-project/logs/video_structurization/grpc-pic-server/:/data/yyt/java-project/logs/video_structurization/grpc-pic-server/ \
jgh/grpc-pic-server

echo "部署完成"

大数据环境搭建脚本

xsync.sh

#!/bin/bash
#获取输入参数个数,如果没有参数,则直接退出
pcount=$#
if((pcount==0)); then
	echo no args;
exit;
fi

#获取文件名称
p1=$1
fname=`basename $p1`
	echo fname=$fname

#获取上级目录到绝对路径
pdir=`cd -P $(dirname $p1); pwd`
	echo pdir=$pdir

#循环
for host in slave1 slave2 slave3
do
    echo --------------------- slave$host ----------------
    rsync -av $pdir/$fname $host:$pdir
done

xcall.sh

#!/bin/bash

for i in master slave1 slave2 slave3
do
	echo ----------- $i --------------
	ssh $i "$*"
done

kafka.sh

#! /bin/bash

kafka_home=/home/yyt/bigdata/kafka

case $1 in
"start"){
        for i in slave1 slave2 slave3
        do
                echo " --------启动 $i Kafka-------"
                
                ssh $i "source /etc/profile ; $kafka_home/bin/kafka-server-start.sh -daemon $kafka_home/config/server.properties "
        done
};;
"stop"){
        for i in slave1 slave2 slave3
        do
                echo " --------停止 $i Kafka-------"
                ssh $i "source /etc/profile ; $kafka_home/bin/kafka-server-stop.sh stop"
        done
};;
esac

flume1.sh 收集

#! /bin/bash

case $1 in
"start"){
    for i in slave1 slave2
    do
        echo " --------启动 $i 采集Flume-------"
        ssh $i "source /etc/profile ; nohup /home/yyt/bigdata/flume/bin/flume-ng agent --conf-file /home/yyt/bigdata/flume/conf/file-flume-kafka.conf --name a1 -Dflume.root.logger=INFO,LOGFILE >/dev/null 2>&1 &"
    done
};; 
"stop"){
    for i in slave1 slave2
    do
        echo " --------停止 $i 采集Flume-------"
        ssh $i "ps -ef | grep file-flume-kafka | grep -v grep |awk '{print \$2}' | xargs kill"
    done

};;
esac

flume2.sh 消费

#! /bin/bash

case $1 in
"start"){
        for i in slave3
        do
                echo " --------启动 $i 消费flume-------"
                ssh $i " source /etc/profile ; nohup /home/yyt/bigdata/flume/bin/flume-ng agent --conf-file /home/yyt/bigdata/flume/conf/kafka-flume-hdfs.conf --name a1 -Dflume.root.logger=INFO,LOGFILE >/home/yyt/bigdata/flume/log.txt   2>&1 &"
        done
};;
"stop"){
        for i in slave3
        do
                echo " --------停止 $i 消费flume-------"
                ssh $i "ps -ef | grep kafka-flume-hdfs | grep -v grep |awk '{print \$2}' | xargs kill -9"
        done

};;
esac

cluster.sh

#! /bin/bash

case $1 in
"start"){
 echo " -------- 启动 集群 -------"

 echo " -------- 启动 hadoop集群 -------"
 start-dfs.sh 
 start-yarn.sh
 
 #启动 Kafka采集集群
 #Kafka的启动需要一定时间,此时间根据用户计算机的性能而定,可适当调整 
 kafka.sh start
 sleep 6s;
 
 #启动 flume生成集群
 flume1.sh start
 
 #启动 flume消费集群
 flume2.sh start
};;
"stop"){
 echo " -------- 停止 集群 -------"

 echo " -------- 停止 hadoop集群 -------"
 stop-dfs.sh 
 stop-yarn.sh
 
 #停止 Kafka采集集群
 kafka.sh stop
 sleep 6s;
 
 #关闭 flume生成集群
 flume1.sh stop
 
 #关闭 flume生成集群
 flume2.sh stop
};;
esac

jar包方式的启动和停止脚本

start.sh

oa.start.sh

#!/bin/sh

JAR_NAMES=("oa-web-admin.jar" "oa-api.jar" "workflow-web-api.jar")
PRO_NAMES=("oaweb" "oaapi" "workflow")                                           
PORTS=(9090 9091 9096);
HOME="/home/yyt/java-project/oa/jar"

for((i=0;i<${#PORTS[@]};i++)) 
do 
	echo "--------${JAR_NAMES[i]} 开始启动--------------"
	P_ID=`netstat -anp|grep ${PORTS[i]}|awk '{printf $7}'|cut -d/ -f1`
	if [ -n "$P_ID" ]; then  
		echo "server already start,pid:$P_ID"  
		return 0
	fi
	#进入命令所在目录
	cd $HOME
	nohup java -jar $HOME/${JAR_NAMES[i]} --server.port=${PORTS[i]} --spring.profiles.active=${PRO_NAMES[i]} -Dspring.config.location=application-${PRO_NAMES[i]}.yml > /dev/null 2>&1 &
	 
	P_ID=`netstat -anp|grep ${PORTS[i]}|awk '{printf $7}'|cut -d/ -f1`
	while [ -z "$P_ID" ];do
		P_ID=`netstat -anp|grep ${PORTS[i]}|awk '{printf $7}'|cut -d/ -f1`
	done
	echo "--------${JAR_NAMES[i]}在port:${PORTS[i]} pid:$P_ID启动成功--------------"
done
#!/bin/bash
echo 'start emall-admin... waitting 5s'
nohup java -jar ./emall-admin/emall-admin.jar \
--server.port=8085 \
--spring.profiles.active=prod \
>/dev/null 2>&1  &

sleep 5

stop.sh

#!/bin/bash

#这里可替换为你自己的执行程序,其他代码无需更改
APP_NAME_TOILET_API=toilet-api.jar

#检查程序是否在运行
is_exist(){
 pid=`ps -ef|grep $1 |grep -v grep|awk '{print $2}' `
 #如果不存在返回1,存在返回0 
 if [ -z "${pid}" ]; then
 return 1
 else
 return 0
 fi
}

#停止方法
stop(){
 is_exist $1
 if [ $? -eq "0" ]; then
 kill -9 $pid
 else
 echo "$1 is not running"
 fi 
}
 
echo 'stop emall-admin ...'
stop ${APP_NAME_TOILET_API}
echo 'stop success...'

oa-stop.sh

#!/bin/sh

JAR_NAMES=("oa-web-admin.jar" "oa-api.jar" "workflow-web-api.jar")                                           
PORTS=(9090 9091 9096);

for((i=0;i<${#PORTS[@]};i++)) 
do 
	echo "--------${JAR_NAMES[i]} 开始关闭--------------"
	P_ID=`netstat -anp|grep ${PORTS[i]}|awk '{printf $7}'|cut -d/ -f1`
	if [ -z "$P_ID" ]; then
		echo "===${JAR_NAMES[i]} process not exists or stop success"
		return 0
	fi
	kill -9 $P_ID
	echo "${JAR_NAMES[i]} killed success"
done

docker-compose.yml

version: "3.3"
services:
  redis:
    image: redis
    ports:
    - "6379:6379"
    command: redis-server /etc/redis/redis.conf
    volumes:
      - /data/redis/data:/data
      - /etc/localtime:/etc/localtime:ro
      - /data/redis/conf/redis.conf:/etc/redis/redis.conf
    restart: always


  rabbitmq:
    image: rabbitmq:3-management
    ports:
      - "15672:15672"
      - "5672:5672"
    environment:
      RABBITMQ_DEFAULT_USER: admin
      RABBITMQ_DEFAULT_PASS: 123456
    volumes:
    - /data/rabbitmq/data:/data
    - /etc/localtime:/etc/localtime:ro
    restart: always

  elasticsearch:
    image: elasticsearch:6.4.3
    ports:
    - "9200:9200"
    - "9300:9300"
    volumes:
    - /data/elasticsearch/data:/usr/share/elasticsearch/data
    - /data/elasticsearch/plugins:/usr/share/elasticsearch/plugins
    - /data/elasticsearch/config:/usr/share/elasticsearch/config
    - /etc/localtime:/etc/localtime:ro
    restart: always
    environment:
      discovery.type: single-node
      memory: 256m

  emall-admin:
    build:
      context: emall-admin
      dockerfile: Dockerfile
    ports:
      - "8085:8085"
    volumes:
    - /data/logs/emall-admin/:/logs/emall-yymt/emall-admin-api/
    restart: always
    environment:
      - spring.profiles.active=docker
    depends_on:
      - redis
      - elasticsearch
      - rabbitmq

  emall-portal:
    build:
      context: emall-portal
      dockerfile: Dockerfile
    ports:
      - "8083:8083"
    volumes:
      - /data/logs/emall-portal/:/logs/emall-yymt/emall-portal-api/
    restart: always
    environment:
      - spring.profiles.active=docker
    depends_on:
      - redis
      - elasticsearch
      - rabbitmq

  emall-search:
    build:
      context: emall-search
      dockerfile: Dockerfile
    ports:
      - "8088:8088"
    volumes:
      - /data/logs/emall-search/:/logs/emall-yymt/emall-search-api/
    restart: always
    environment:
      - spring.profiles.active=docker
    depends_on:
      - redis
      - elasticsearch
      - rabbitmq

redis安装配置

参考redis安装配置
修改requirepass 密码

以服务方式启动 (1安装)
安装命令:redis-server.exe --service-install redis.windows.conf --loglevel verbose
2安装后不会马上启动,可以手动先启动
启动服务命令:redis-server.exe --service-start
关闭服务命令:redis-server.exe --service-stop

windows关闭程序:

netstat -ano | findstr 1031
根据1031端口找出进程pid
taskkill /pid 34504 /f

netstat -ano | findstr 7474
windows停止进程
https://jingyan.baidu.com/article/19020a0a120677529d284228.html
tasklist | more
tskill pid/进程名
taskkill /f /im winword.exe /t

linux 添加开机启动项的三种方法:
https://www.cnblogs.com/joeone/p/10568993.html
写环境变量开启自启生效

vim /etc/init.d/autostart.sh

#!/bin/sh
#chkconfig: 2345 80 90
#description:auto_run
./java-project/zaxq/startup.sh start all

startup.sh

#!/bin/sh

export JAVA_HOME=/usr/java/jdk1.8.0_162
export PATH=$JAVA_HOME/bin:$PATH

export BASEDATA=basedata-api.jar
export FIRECTRL=firectrl-api.jar
export TAG=zaxq-tag-api.jar

export BASEDATA_port=1031
export FIRECTRL_port=1028
export TAG_port=18991 



startBasedata(){
	echo "--------"$BASEDATA" begin starting--------------"
		nohup java -Dfile.encoding=utf-8 -jar /java-project/zaxq/$BASEDATA --server.port=$BASEDATA_port --spring.profiles.active=comm,prod >/dev/null 2>&1 &
        BASEDATA_pid=`lsof -i:$BASEDATA_port|grep "LISTEN"|awk '{print $2}'`
        until [ -n "$BASEDATA_pid" ]
            do
              BASEDATA_pid=`lsof -i:$BASEDATA_port|grep "LISTEN"|awk '{print $2}'`  
            done
        echo "BASEDATA pid is $BASEDATA_pid" 
        echo "--------"$BASEDATA" start success--------------"
}

startFirectrl(){
	 echo "---------"$FIRECTRL" begin starting---------------"
		nohup java -Dfile.encoding=utf-8 -jar /java-project/zaxq/$FIRECTRL --server.port=$FIRECTRL_port --spring.profiles.active=comm,prod >/dev/null 2>&1 &
        FIRECTRL_pid=`lsof -i:$FIRECTRL_port|grep "LISTEN"|awk '{print $2}'` 
        until [ -n "$FIRECTRL_pid" ]
            do
              FIRECTRL_pid=`lsof -i:$FIRECTRL_port|grep "LISTEN"|awk '{print $2}'`  
            done
        echo "FIRECTRL pid is $FIRECTRL_pid"     
        echo "---------"$FIRECTRL" start success-----------"
}


startTag(){
	 echo "--------"$TAG" begin starting---------------"
		nohup java -Dfile.encoding=utf-8 -jar /java-project/zaxq/$TAG --server.port=$TAG_port --spring.profiles.active=prod >/dev/null 2>&1 &
        FIRECTRL_pid=`lsof -i:$TAG_port|grep "LISTEN"|awk '{print $2}'` 
        until [ -n "$FIRECTRL_pid" ]
            do
              FIRECTRL_pid=`lsof -i:$TAG_port|grep "LISTEN"|awk '{print $2}'`  
            done
        echo "FIRECTRL pid is $FIRECTRL_pid"     
        echo "---------"$TAG" start success-----------"
}
 
stopBasedata(){
	P_ID=`ps -ef | grep -w $BASEDATA | grep -v "grep" | awk '{print $2}'`
        if [ "$P_ID" == "" ]; then
            echo "==="$BASEDATA" process not exists or stop success"
        else
            kill -9 $P_ID
            echo ""$BASEDATA" killed success"
        fi

}

stopFirectrl(){
	P_ID=`ps -ef | grep -w $FIRECTRL | grep -v "grep" | awk '{print $2}'`
        if [ "$P_ID" == "" ]; then
            echo "==="$FIRECTRL" process not exists or stop success"
        else
            kill -9 $P_ID
            echo ""$FIRECTRL" killed success"
       
        fi
}



stopTag(){
	P_ID=`ps -ef | grep -w $TAG | grep -v "grep" | awk '{print $2}'`
        if [ "$P_ID" == "" ]; then
            echo "==="$TAG" process not exists or stop success"
        else
            kill -9 $P_ID
            echo ""$TAG" killed success"
       
        fi
}


case "$1" in
 
start)
	case "$2" in
		
		basedata-api|BASEDATA)
			## 启动eureka
			startBasedata
			;;
			
		firectrl-api|FIRECTRL)
			## 启动GZGL
			startFirectrl
			;;
		
		zaxq-tag-api|TAG)
			## 启动GZGL
			startTag
			;;
			
		""|"")
			## 启动所有
			startBasedata
			startFirectrl
			startTag
			;;
    esac
	;;
 
 stop)
	case "$2" in
		
		basedata-api|BASEDATA)
			stopBasedata
			;;
			
		firectrl-api|FIRECTRL)	
			stopFirectrl
			;;
		zaxq-tag-api|TAG)
			stopTag
			;;
		""|"")
			stopBasedata
			stopFirectrl
			stopTag
			;;
	esac
	;;
 
restart)
        $0 stop $2
        sleep 2
        $0 start $2
        echo "===restart $2 success==="
        ;;   
esac	
exit 0

startup优化后的

#!/bin/sh

export JAVA_HOME=/usr/java/jdk1.8.0_162
export PATH=$JAVA_HOME/bin:$PATH

start_service(){
	echo "--------$1 begin starting param: $3--------------"
		java -Dfile.encoding=utf-8 -jar $3 >/dev/null 2>&1 &
        pid=`lsof -i:$2|grep "LISTEN"|awk '{print $2}'`
        until [ -n "$pid" ]
            do
              pid=`lsof -i:$2|grep "LISTEN"|awk '{print $2}'`  
            done
        echo $1" pid is $pid" 
        echo "--------"$1" start success--------------"
}

stop_service(){
	P_ID=`ps -ef | grep -w $1 | grep -v "grep" | awk '{print $2}'`
        if [ "$P_ID" == "" ]; then
            echo "==="$1" process not exists or stop success"
        else
            kill -9 $P_ID
            echo ""$1" killed success"
        fi

}

#指定应用重启 ./startup.sh restart oa_api_jar
# 全部启动 ./startup.sh restart 

HOME="/home/yymt/java-project/oa"

export oa_api_jar=oa-api.jar
export oa_api_port=9092
export oa_api_param=$HOME/$oa_api_jar" --server.port="$oa_api_port" --spring.profiles.active=dev --spring.redis.password=aaa111 "


case "$1" in
# -------------------启动
 
start)
	case "$2" in
		
		oa_api_jar)
			## 启动
			start_service $oa_api_jar $oa_api_port "$oa_api_param"
			;;
		
			
		""|"")
			## 启动所有
			start_service $oa_api_jar $oa_api_port "$oa_api_param"
			;;
        esac
	;;

# -------------------停止 
 stop)
	case "$2" in
		
		oa_api_jar)
			stop_service $oa_api_jar $oa_api_port
			;;
			
		""|"")
			stop_service $oa_api_jar $oa_api_port
			;;
	esac
	;;
 
restart)
        $0 stop $2
        sleep 2
        $0 start $2
        echo "===restart $2 success==="
        ;;   
esac	
exit 0

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值