linux系统日常脚本sh

一、系统初始化脚本(init.sh)

#!/bin/bash
 
#操作系统安装
#执行方法: ./init.sh 主机名
 
 
os_hostname=$1
if [ ! -d /alidata ]
then 
  mkdir -p /alidata
fi
 
docker_dir=/alidata/docker
 
if [ ! ${os_hostname}  ];then
	echo "主机名不能为空"
  exit 0
fi

function update_kernel(){
	rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
	yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
	yum  --enablerepo="elrepo-kernel"  -y install kernel-ml.x86_64
	grub2-set-default 0	
	grub2-mkconfig -o /boot/grub2/grub.cfg
	reboot
}

function os_init(){
	hostnamectl set-hostname ${os_hostname}
	#配置hosts文件,第一行末尾将主机名解析
	sed -i "1s/\$/ $(hostname)/" /etc/hosts
	#关闭selinux
	setenforce 0
	sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
	
	#禁用swap
	swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab

	systemctl stop firewalld
	systemctl disable firewalld
	#调优linux系统的最大进程数和最大文件打开数限制
	ulimit -SHn 102400
	sed -i "/^\* soft nofile/d" /etc/security/limits.conf
	sed -i "/^\* hard nofile/d" /etc/security/limits.conf
	cat >> /etc/security/limits.conf << EOF
* soft nofile 102400
* hard nofile 102400
* soft nproc  102400
* hard nproc  102400
EOF

	cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
 
 
 
	cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF


  cat > /etc/sysctl.d/os.conf << EOF
# 最大限度使用物理内存
vm.swappiness = 0
# 决定检查一次相邻层记录的有效性的周期。当相邻层记录失效时,将在给它发送数据前,再解析一次。缺省值是60秒。
net.ipv4.neigh.default.gc_stale_time = 120
# see details in https://help.aliyun.com/knowledge_detail/39428.html
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
# see details in https://help.aliyun.com/knowledge_detail/41334.html
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
# 容器要想访问外部网络,需要本地系统的转发支持
net.ipv4.ip_forward = 1
# 访问业务域名时而会出现无法访问或连接超时的情况
# refer to https://www.ziji.work/kubernetes/kubernetes_cannot_accesspod_port.html
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 0
# bridge-nf 使得 netfilter 可以对 Linux 网桥上的 IPv4/ARP/IPv6 包过滤。
# 比如,设置net.bridge.bridge-nf-call-iptables=1后,二层的网桥在转发包时也会被 iptables 的 FORWARD 规则所过滤。
# refer to https://www.qikqiak.com/k8strain/k8s-basic/install/
# 是否在 iptables 链中过滤 IPv4 包
net.bridge.bridge-nf-call-iptables = 1
# 是否在 ip6tables 链中过滤 IPv6 包
net.bridge.bridge-nf-call-ip6tables = 1
# 是否在 arptables 的 FORWARD 中过滤网桥的 ARP 包
net.bridge.bridge-nf-call-arptables = 1
# 定义了系统中每一个端口最大的监听队列的长度,这是个全局的参数,默认值为128
net.core.somaxconn = 32768
# 服务器在访问量很大时,出现网络连接丢包的问题
# 比较现代的系统(Ubuntu 16+, CentOS 7+)里,64 位,16G 内存的机器,
# max 通常默认为 524288,
# bucket 为 131072(在sunrpc.conf文件中修改)。
# 随着内存大小翻倍这 2 个值也翻倍。
# refer to https://testerhome.com/topics/15824
net.netfilter.nf_conntrack_max = 524288
# 单个进程可分配的最大文件数
fs.nr_open = 6553600
# Linux系统级别限制所有用户进程能打开的文件描述符总数
fs.file-max = 6553600
# 每个进程内存拥有的VMA(虚拟内存区域)的数量。虚拟内存区域是一个连续的虚拟地址空间区域。在进程的生命
# 周期中,每当程序尝试在内存中映射文件,链接到共享内存段,或者分配堆空间的时候,这些区域将被创建。
# 进程加载的动态库、分配的内存、mmap的内存都会增加VMA的数量。通常一个进程会有小于1K个VMA,如果进程有
# 特殊逻辑,可能会超过该限制。
# 调优这个值将限制进程可拥有VMA的数量。限制一个进程拥有VMA的总数可能导致应用程序出错,因为当进程达到
# 了VMA上线但又只能释放少量的内存给其他的内核进程使用时,操作系统会抛出内存不足的错误。如果你的操作系
# 统在NORMAL区域仅占用少量的内存,那么调低这个值可以帮助释放内存给内核用。
# refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
# 可以使用命令 cat /proc/${pid}/maps 来查看指定进程拥有的VMA。
vm.max_map_count = 655360
# 修复ipvs模式下长连接timeout问题 小于900即可
# refer to https://github.com/moby/moby/issues/31208
# ipvsadm -l --timout
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
# refer to https://github.com/Azure/aks-engine/blob/d6f4929a659241ea33d8fd4d9fc86d0e27b0cb07/parts/k8s/cloud-init/artifacts/sysctl-d-60-CIS.conf
# refer to https://github.com/kubernetes/kubernetes/blob/75d45bdfc9eeda15fb550e00da662c12d7d37985/pkg/kubelet/cm/container_manager_linux.go#L359-L397
vm.overcommit_memory = 1
kernel.panic = 10
kernel.panic_on_oops = 1
# refer to https://github.com/Azure/AKS/issues/772
fs.inotify.max_user_watches = 1048576
# 指定每个真实用户 ID 可以创建的 inotify 实例数量上限
# 指定 inotify 实例可以排队事件数量的上限
fs.inotify.max_user_instances = 1048576
fs.inotify.max_queued_events = 1048576
EOF

sysctl -p /etc/sysctl.d/os.conf
}
 
function yum_repo_install(){
	#删除原有yum源
	rm -rf /etc/yum.repos.d/*
	#安装aliyun的基本源和epel源
	curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
	curl -o /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
	yum makecache fast
	#安装常用的基本软件
	yum install -y wget vim net-tools lrzsz tree chrony zip unzip bash-completion bash-completion-extras bzip2
	#启动centos7时间同步服务
	timedatectl set-timezone Asia/Shanghai
	systemctl start chronyd
	systemctl enable chronyd
	#与aliyun时间同步
	sed -i -e '/^server/s/^/#/'  -e '1a server ntp.aliyun.com iburst' /etc/chrony.conf
	systemctl restart chronyd	
}
 
function docker_install(){
	rpm -qa | grep docker-ce
	if [ $? = 1 ];
	then
		curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
		yum makecache fast
		#安装docker社区版本
		yum -y install docker-ce
		if [ "${docker_dir}" == "" ]; 
		then
			docker_dir=/alidata/docker
		fi
		mkdir -p $docker_dir /etc/docker /etc/systemd/system/docker.service.d/
		cat << EOF > /etc/docker/daemon.json
{"registry-mirrors": ["https://a14c78qe.mirror.aliyuncs.com"],
"log-driver": "json-file",
    "log-opts": {
        "max-size": "100m",
        "max-file": "5"
    },
"data-root": "${docker_dir}",
"live-restore": false
}
EOF
	
		cat << EOF > /etc/systemd/system/docker.service.d/override.conf
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd  -H 127.0.0.1:2375 -H unix:///var/run/docker.sock $DOCKER_NETWORK_OPTIONS
EOF
 
		systemctl daemon-reload
		systemctl enable docker
		systemctl start docker
		systemctl status docker
	else
	  echo "docker-ce already installed"
	fi
	
}
 
 
 
#os_init
#update_kernel
#yum_repo_install
#docker_install

二、docker安装中间件(middleware_install)

#!/bin/bash

#中间件docker安装
#执行方法: ./middleware_install.sh
##update activemq version from 5.14.3 to 5.17.1
##update rocketmq version from 4.9.2 to 4.9.4,ADD dashboard login password


#IP=$1
IP=$(hostname -I|cut -d" " -f 1)
scripts_dir=/alidata/middleware
redis_dir=/alidata/app/redis
zookeeper_dir=/alidata/app/zookeeper
kafka_dir=/alidata/app/kafka
minio_dir=/alidata/app/minio
tdengine_dir=/alidata/app/tdengine
elasticsearch_dir=/alidata/app/elasticsearch
rocketmq_dir=/alidata/app/rockmqdata

nacos_dir=/alidata/app/nacos

#判断IP不能为空
#if [ ! $IP  ];then
#	echo "IP不能为空"
#	exit 0
#fi
#如果目录不存在,创建目录
if [ ! -d "$scripts_dir" ];then
    mkdir $scripts_dir
fi

#redis
redis_install(){
	mkdir -p $redis_dir/config
	#创建redis配置文件
	cat > $redis_dir/config/redis.conf << EOF
notify-keyspace-events Ex
requirepass "Pinming1024"
maxmemory 4gb
client-output-buffer-limit pubsub 33554432 8388608 60
appendfsync no
no-appendfsync-on-rewrite yes
save 900 1 
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
dir "/data"
aof-rewrite-incremental-fsync yes
maxmemory-policy volatile-lru
lazyfree-lazy-server-del yes
lazyfree-lazy-expire yes
lazyfree-lazy-eviction no
dynamic-hz yes
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-compress-depth 0
list-max-ziplist-size -2
set-max-intset-entries 512
slowlog-log-slower-than 2000
slowlog-max-len 1024
stream-node-max-bytes 4096
stream-node-max-entries 100
timeout 0
zset-max-ziplist-entries 128
zset-max-ziplist-value 64

EOF

	cd $scripts_dir
	cat > redis.sh  << EOF
docker rm -f redis 
docker run --name redis -d \\
--restart always -p 6379:6379 \\
-v $redis_dir/data:/data \\
-v $redis_dir/config/redis.conf:/etc/redis/redis.conf \\
swr.cn-east-3.myhuaweicloud.com/syh/redis:6.2.6 \\
redis-server /etc/redis/redis.conf --appendonly yes

EOF
	#执行脚本
	sh redis.sh
}

#zookeeper
zookeeper_install(){
    mkdir $zookeeper_dir
	cd $scripts_dir
	cat > zookeeper.sh  << EOF
docker rm -f zookeeper3.6
docker run -d \\
--name zookeeper3.6 --restart always \\
-p 2181:2181 -p 12888:2888 -p 13888:3888 \\
-v $zookeeper_dir/data:/data \\
-v $zookeeper_dir/datalog:/datalog \\
-e "ZOO_AUTOPURGE_PURGEINTERVAL=72" \\
-e "ZOO_AUTOPURGE_SNAPRETAINCOUNT=10" \\
swr.cn-east-3.myhuaweicloud.com/syh/zookeeper:3.6.3

EOF
#执行脚本
	sh zookeeper.sh
}

#kafka
kafka_install(){
	#创建目录
	mkdir -p $kafka_dir
	cd $scripts_dir
	cat > kafka.sh  << EOF
docker stop kafka 
docker rm -f kafka
docker run --name kafka --privileged -h kafka -d \\
-e KAFKA_ADVERTISED_HOST_NAME=$IP \\
-e KAFKA_advertised_listeners=PLAINTEXT://$IP:9092 \\
-e KAFKA_ADVERTISED_PORT=9092 \\
-e KAFKA_BROKER_ID=1 \\
-e KAFKA_ZOOKEEPER_CONNECT=$IP:2181/kafka \\
-e KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS=60000 \\
-e KAFKA_transaction_state_log_replication_factor=3 \\
-e KAFKA_auto_create_topics_enable=true \\
-p 9092:9092  \\
-v $kafka_dir:/kafka \\
-v /var/run/docker.sock:/var/run/docker.sock \\
--restart always \\
swr.cn-east-3.myhuaweicloud.com/syh/kafka:2.7.1
 
EOF
	#执行脚本
	sh kafka.sh
}

#rabbitmq
rabbitmq_install(){
	mkdir -p $rabbitmq_dir
	cd $scripts_dir
	cat > rabbitmq.sh  << EOF
docker rm -f rabbitmq
docker run -d  --name rabbitmq --restart always \\
 -p 5672:5672 -p 15672:15672 \\
-v $rabbitmq_dir/data:/var/lib/rabbitmq \\
-v /etc/localtime:/etc/localtime:ro \\
-e RABBITMQ_DEFAULT_USER=admin \\
-e RABBITMQ_DEFAULT_PASS=Pinming1024 \\
--hostname DockerRabbit \\
swr.cn-east-3.myhuaweicloud.com/syh/rabbitmq:3.8.8-management

EOF
	#执行脚本
	sh rabbitmq.sh
}

#NACOS安装
#需要指定好MySQL服务器账号密码,地址端口
#需要提前导入好MySQL数据库
nacos_install(){
    mkdir -p $nacos_dir
	cd $scripts_dir
	cat > nacos.sh  << EOF
docker run \\
--name nacos -d \\
-p 8848:8848 \\
-v $nacos_dir/logs:/nacos/logs \\
--privileged=true \\
--restart=always \\
-e MODE=standalone \\
-e JVM_XMS=256m \\
-e JVM_XMX=256m \\
-e JVM_XMN=256m \\
-e SPRING_DATASOURCE_PLATFORM=mysql \\
-e MYSQL_SERVICE_HOST=$MYSQL_HOST \\
-e MYSQL_SERVICE_PORT=$MYSQL_PORT \\
-e MYSQL_SERVICE_USER=$MYSQL_USER \\
-e MYSQL_SERVICE_PASSWORD=$MYSQL_PASSWORD \\
-e MYSQL_SERVICE_DB_NAME=nacos \\
swr.cn-east-3.myhuaweicloud.com/syh/nacos:2.0.3

EOF
	
}

#minio
minio_install(){
	mkdir -p $minio_dir
	cd $scripts_dir
	cat > minio.sh  << EOF
docker run -d \\
   --net=host \\
   --restart always \\
   -p 9000:9000 \\
   -p 9090:9090 \\
   --name minio \\
   -v $minio_dir/data:/data \\
   -v /etc/localtime:/etc/localtime \\
   -e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \\
   -e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \\
   minio/minio server /data --address 192.168.250.94:9000 --console-address ":9090"

EOF
	#执行脚本
	sh minio.sh
}

#TDengine
tdengine_install(){
	mkdir -p $tdengine_dir
	cd $scripts_dir
	cat > tdengine.sh  << EOF
docker run -d \\
--name iot-tdengine \\
--restart always \\
-h tdengine \\
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 \\
-p 6043-6049:6043-6049/udp \\
-v $tdengine_dir/log:/var/log/taos \\
-v $tdengine_dir/data:/var/lib/taos \\
swr.cn-east-3.myhuaweicloud.com/syh/tdengine:3.0.1.4

EOF
	#执行脚本
	sh rabbitmq.sh
}

#rocketmq安装
rocketmq_install(){
	#创建用户,与定制容器中uid、gid保持一致
	groupadd -g 3000 rocketmq
	useradd -u 3000 -g rocketmq -M -s /sbin/nologin rocketmq
	#创建目录
	mkdir -p $rocketmq_dir/{broker/logs,broker/store,config,namesrv/store,namesrv/logs,dashboard}	
	#授权
	chown -R rocketmq:rocketmq $rocketmq_dir
	cat > $rocketmq_dir/config/broker.conf  << EOF
brokerClusterName = DefaultCluster
brokerName = broker-a
brokerId = 0
deleteWhen = 04
fileReservedTime = 48
brokerRole = ASYNC_MASTER
flushDiskType = ASYNC_FLUSH
brokerIP1 = $IP
listenPort=10911
brokerId=0
autoCreateTopicEnable=true
mapedFileSizeConsumeQueue=300000
diskMaxUsedSpaceRatio=90

EOF

	mkdir -p $scripts_dir/rockmq
	cd $scripts_dir/rockmq
	cat > rmqbroker.sh  << EOF
docker rm -f rmqbroker
docker run -d -p 10911:10911 -p 10909:10909  \\
--restart always \\
-v $rocketmq_dir/broker/logs:/home/rocketmq/logs \\
-v $rocketmq_dir/config/broker.conf:/home/rocketmq/conf/broker.conf \\
-v $rocketmq_dir/broker/store:/home/rocketmq/store \\
--name rmqbroker  \\
-e "NAMESRV_ADDR=$IP:9876" \\
-e "JAVA_OPT_EXT=-Xmx2048m -Xms2048m -Xmn1024m" \\
-e "MAX_POSSIBLE_HEAP=200000000" \\
swr.cn-east-3.myhuaweicloud.com/syh/rocketmq:4.9.4 \\
sh mqbroker -c /home/rocketmq/conf/broker.conf

EOF

	cat > rmqnamesrv.sh  << EOF
docker rm -f rmqnamesrv
docker run -d -p 9876:9876  \\
--restart always   \\
-v $rocketmq_dir/namesrv/logs:/home/rocketmq/logs \\
--name rmqnamesrv  \\
-e "JAVA_OPT_EXT=-Xms512M -Xmx512M -Xmn128m" \\
-e "MAX_POSSIBLE_HEAP=100000000" \\
swr.cn-east-3.myhuaweicloud.com/syh/rocketmq:4.9.4 \\
sh mqnamesrv

EOF
	
	cat > $rocketmq_dir/dashboard/users.properties << EOF
admin=$rockermq_password,1

EOF


	cat > rocketmq-ng.sh << EOF
docker rm -f rocketmq-dashboard
docker run -d  --name=rocketmq-dashboard  \\
--restart always   \\
-v $rocketmq_dir/dashboard/users.properties:/tmp/rocketmq-console/data/users.properties \\
-e "JAVA_OPTS=-Drocketmq.namesrv.addr=$IP:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false -Drocketmq.config.loginRequired=true" \\
-p 8080:8080 -t \\
swr.cn-east-3.myhuaweicloud.com/syh/rocketmq-dashboard:4.9.4

EOF

	#执行脚本
	sh rmqbroker.sh
	sh rmqnamesrv.sh
	sh rocketmq-ng.sh
	
}

#elasticsearch
elasticsearch_install(){
   mkdir -p $elasticsearch_dir/data
   chmod -R 777 $elasticsearch_dir
   cd $scripts_dir
	cat > elasticsearch.sh  << EOF
docker rm -f elasticsearch7	
docker run -d -p 9200:9200 \\
-p 9300:9300 \\
--restart always \\
--name elasticsearch7 \\
-v $elasticsearch_dir/data:/usr/share/elasticsearch/data \\
-e "discovery.type=single-node" \\
swr.cn-east-3.myhuaweicloud.com/syh/elasticsearch:7.1.0

EOF
	#执行脚本
	sh elasticsearch.sh
	sleep 5
##控制台脚本
	cat > elasticsearch-ng.sh << EOF
docker rm -f  elasticsearch-ng	
docker run -p 9800:9800 \\
--name elasticsearch-ng -d \\
--link elasticsearch7:elasticsearch7 \\
swr.cn-east-3.myhuaweicloud.com/syh/elasticsearch-ng:v20220416

EOF
    #执行脚本
	sh elasticsearch-ng.sh
	
}

#调用函数
#redis_install
#zookeeper_install
#kafka_install
#rabbitmq_install
#nacos_install
#minio_install
#tdengine_install
#rocketmq_install
#elasticsearch_install

三、mysql8安装脚本(mysql8_install)

#!/bin/bash

#mysql8.0.27安装,默认安装MySQL8.0.27
#执行方法: ./mysql_install.sh


mysql_version="8.0.27"
mysql_base_dir=/usr/local/mysql
mysql_data_dir=/alidata/mysql
mysql_root_password=Qaz@#21323!~
mysql_zzsa_password=Qaz@#21323!~
download_mysql="https://mirrors.tuna.tsinghua.edu.cn/mysql/downloads"

##确保原有环境无MySQL
for i in $(rpm -qa|grep mysql)
do
	rpm -e $i --nodeps
done

rm -rf /var/lib/mysql && rm -rf /etc/my.cnf

##安装必要依赖
yum install -y libaio met-tools wget 
##获取当前目录
current_dir=$(
   cd "$(dirname "$0")"
   pwd
)

if [ ! -d $mysql_base_dir ]
then
	mkdir -p $mysql_base_dir
fi	

if [ -f "$current_dir/mysql-${mysql_version}-linux-glibc2.12-x86_64.tar.xz" ]
then
	tar -xJvf mysql-${mysql_version}-linux-glibc2.12-x86_64.tar.xz
	mkdir -p $mysql_base_dir
	mv mysql-${mysql_version}-linux-glibc2.12-x86_64/* $mysql_base_dir
else
	wget -c ${download_mysql}/MySQL-8.0/mysql-${mysql_version}-linux-glibc2.12-x86_64.tar.xz --no-check-certificate
	tar -xJvf mysql-${mysql_version}-linux-glibc2.12-x86_64.tar.xz
	mv mysql-${mysql_version}-linux-glibc2.12-x86_64/* $mysql_base_dir
fi

groupadd mysql
useradd -g mysql mysql
chown -R mysql.mysql /usr/local/mysql
cat << EOF>> /home/mysql/.bash_profile
export PATH=$mysql_base_dir/bin:$mysql_base_dir/lib:\$PATH
EOF

cat << EOF>> /etc/profile
export PATH=\$PATH:$mysql_base_dir/bin
EOF

source /etc/profile

mkdir -p ${mysql_data_dir}/{data,log,binlog,conf,tmp}
chown -R mysql.mysql ${mysql_data_dir}

cat > ${mysql_data_dir}/conf/my.cnf << EOF
[mysqld]
lower_case_table_names = 1
user = mysql
server_id = 1
port = 3306
default-time-zone = '+08:00'
enforce_gtid_consistency = ON
gtid_mode = ON
binlog_checksum = none
authentication_policy = mysql_native_password
skip-name-resolve = ON
open_files_limit = 65535
table_open_cache = 2000
sql_mode = '' 
log_bin_trust_function_creators = TRUE
max_connections = 4000
datadir = ${mysql_data_dir}/data
tmpdir = ${mysql_data_dir}/tmp/
#################innodb########################
innodb_data_file_path = ibdata1:512M;ibdata2:512M:autoextend
innodb_buffer_pool_size = 4G
innodb_flush_log_at_trx_commit = 2
innodb_io_capacity = 600
innodb_lock_wait_timeout = 120
innodb_log_buffer_size = 8M
innodb_log_file_size = 200M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 85
innodb_read_io_threads = 8
innodb_write_io_threads = 8
innodb_thread_concurrency = 32
innodb_file_per_table
innodb_rollback_on_timeout
innodb_undo_directory = ${mysql_data_dir}/data
innodb_log_group_home_dir = ${mysql_data_dir}/data
###################session###########################
join_buffer_size = 8M
key_buffer_size = 256M
bulk_insert_buffer_size = 8M
max_heap_table_size = 96M
tmp_table_size = 96M
read_buffer_size = 8M
sort_buffer_size = 2M
max_allowed_packet = 64M
read_rnd_buffer_size = 32M
############log set###################
log-error = ${mysql_data_dir}/log/mysqld.err
log-bin = ${mysql_data_dir}/binlog/binlog
log_bin_index = ${mysql_data_dir}/binlog/binlog.index
max_binlog_size = 500M
slow_query_log_file = ${mysql_data_dir}/log/slow.log
slow_query_log = 1
long_query_time = 10
log_queries_not_using_indexes = OFF
log_throttle_queries_not_using_indexes = 10
log_slow_admin_statements = ON
log_timestamps=system
log_output = FILE,TABLE
master_info_file = ${mysql_data_dir}/binlog/master.info

EOF

##初始化MySQL --insecure生成空密码
mysqld --defaults-file=${mysql_data_dir}/conf/my.cnf  --initialize-insecure 

##设置使用systemctl来管理MySQL
cat > /etc/systemd/system/mysqld.service << EOF
[Unit]
Description=MySQL Server
Documentation=man:mysqld(8)
Documentation=http://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target
After=syslog.target

[Install]
WantedBy=multi-user.target

[Service]
User=mysql
Group=mysql

# Have mysqld write its state to the systemd notify socket
Type=notify

# Disable service start and stop timeout logic of systemd for mysqld service.
TimeoutSec=0

# Start main service
ExecStart=/usr/local/mysql/bin/mysqld --defaults-file=/alidata/mysql/conf/my.cnf

# Sets open_files_limit
LimitNOFILE = 65535
Restart=on-failure
RestartPreventExitStatus=1

# Set environment variable MYSQLD_PARENT_PID. This is required for restart.
Environment=MYSQLD_PARENT_PID=1

PrivateTmp=false

EOF

systemctl daemon-reload
systemctl start mysqld
systemctl enable mysqld

##更新root用户得本地密码
mysql -uroot -hlocalhost -e "alter user 'root'@'localhost' identified by \"${mysql_root_password}\";flush privileges;"
##创建一个业务用户
mysql -uroot -p${mysql_root_password} -e "create user 'zzsa'@'%' identified by \"${mysql_zzsa_password}\";grant all privileges on *.* to 'zzsa'@'%';flush privileges;"

四、mysql备份脚本、微信接口告警(mysqlbackup)

#!/bin/bash
source /etc/profile
IP=192.168.1.151
DATE=`date +%Y%m%d%H%M%S`
USERNAME=zzsa
PASSWORD=XXXXX
RMDIR=/alidata/backup/mysqlbackup/$IP/
BACKUPDIR=/alidata/backup/mysqlbackup/$IP/data/$DATE
PORT=3306
LogFile=/alidata/scripts/mysqlbackup.log
DATABASES=$(/usr/local/mysql/bin/mysql -h$IP -u$USERNAME -p$PASSWORD -P$PORT -e "show databases" |grep -v mysql|grep -v Database | grep -v test| grep -v information_schema|grep -v  performance_schema )
if [ ! -d "$BACKUPDIR" ]; then
      mkdir  -p $BACKUPDIR
fi
echo -e "\033[36m ---------------------开始备份 $DATE -----------------\033[0m">>$LogFile
for i in $DATABASES
do       
        /usr/local/bin/mydumper -h $IP -u $USERNAME  -p $PASSWORD -P $PORT  -k -t 4 -F 64  -c -G -E -R  -B $i -o $BACKUPDIR/$i
        if [ $? -eq 0 ]; then
                echo -e " \033[32m 数据库 $i 自动备份成功\033[0m">>$LogFile
                #微信报警
                   curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=126acc51-3bd8-421d-ae1c-19bfdbXXXXX' \
                   -H 'Content-Type: application/json' \
                   -d '
                   {
                        "msgtype": "markdown",
                        "markdown": {
                            "content": "<font color=\"warning\">数据库 '$i' 自动备份成功</font>"
                        }
                   }'           
        else
                echo -e "\033[31m  数据库 $i 备份失败\033[0m">>$LogFile
                #微信报警
                   curl 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=126acc51-3bd8-421d-ae1c-19bfdbXXXXX' \
                   -H 'Content-Type: application/json' \
                   -d '
                   {
                        "msgtype": "markdown",
                        "markdown": {
                            "content": "<font color=\"warning\">数据库 '$i' 自动备份失败,请快速解决问题!!</font>"
                        }
                   }'
        fi 


done
if [  -d "$RMDIR" ]; then
        find /alidata/backup/mysqlbackup/$IP/data/ -mtime +30 -name "20*" -exec rm -rf {} \;
fi

echo -e "\033[36m  ---------------------备份结束 ----------------- \033[0m">>$LogFile

五、飞书告警脚本

#!/bin/sh
 
DATE=`date "+%Y-%m-%d %H:%M:%S"`
DATE_R=`echo ${DATE}|awk '{print $1}'`
DATE_S=`echo ${DATE}|awk '{print $2}'`
app_name=test
db_name=test_db

function feishu(){
	curl -X POST \
	  https://open.feishu.cn/open-apis/bot/v2/hook/2XXXXXXXX \
	  -H 'Content-Type: application/json' \
		-d '{
		"msg_type": "post",
		"content": {
			"post": {
				"zh_cn": {
					"content": [
						[
							{
								"tag": "text",
								"text": "db备份时间:'${DATE_R}' '${DATE_S}'\n"
							},
							{
								"tag": "text",
								"text": "db备份环境:'${app_name}'\n"
							},
							{
								"tag": "text",
								"text": "db备份数据库:'${db_name}'\n"
							},
							{
								"tag": "text",
								"text": "db备份状态:'${1}'"
							}
						],
						[

						]
					]
				}
			}
		}
	}'
}
feishu '成功'
#!/bin/sh
#飞书发通知美颜版
function feishu_new(){
        curl -X POST \
          ${feishu_url} \
          -H 'Content-Type: application/json' \
                -d '
{
    "msg_type": "interactive",
    "card": {
        "config": {
            "wide_screen_mode": true,
            "enable_forward": true
        },
        "elements": [
            {
                "tag": "div",
                "text": {
                    "content": "**构建环境: **'${app_env}'\n**JOB名称: **'${job_name}'\n**构建id: **'${build_id}'\n**发布分支: **'${git_branch}'\n**构建结果: **'${build_result}'",
                    "tag": "lark_md"
                },
                "content": "",
                "elements": null
            },
            {
                "tag": "hr",
                "text": {
                    "content": "",
                    "tag": ""
                },
                "content": "",
                "elements": null
            }
        ],
        "header": {
            "title": {
                "content": "项目构建结果通知",
                "tag": "plain_text"
            },
            "template": "green"
        }
    }
}'
				
}

feishu_new

六、多主机批量备份数据库并飞书告警

#!/bin/bash
set -x

#mysql_backup

db_user='mysql_user'
db_passwd='xxxxxx'
backup_path=/xxxx/xxxx/db_backup
LogFile=/opt/script/mysqlbackup.log
Webhook=https://open.feishu.cn/open-apis/bot/v2/hook/xxxxxxxx-token

#RDS主机和排除的数据库
declare -A host_data
host_data=(
["ip"]="排除备份的数据库多个空格隔开"
["192.168.1.102"]="xxdb1 xxdb2 xxdb"
)




function feishu(){
	DATE_R=`echo ${TIME_LOG}|awk '{print $1}'`	
	DATE_S=`echo ${TIME_LOG}|awk '{print $2}'`
	curl -X POST \
	  ${Webhook} \
	  -H 'Content-Type: application/json' \
		-d '{
		"msg_type": "post",
		"content": {
			"post": {
				"zh_cn": {
					"content": [
						[
							{
								"tag": "text",
								"text": "db备份时间:'${DATE_R}' '${DATE_S}'\n"
							},
							{
								"tag": "text",
								"text": "db备份实例:'${rds_id}'\n"
							},
							{
								"tag": "text",
								"text": "db备份数据库:'${database}'\n"
							},
							{
								"tag": "text",
								"text": "db备份状态:'${1}'"
							}
						],
						[

						]
					]
				}
			}
		}
	}'
}

function is_exits(){
	#第一个参数是数据库
	#第二个参数是排除数据列表
	for ex_data in ${2};do			
		if [ "$1" == "$ex_data" ];then
			echo 'yes'
		fi

	done

}


for key in $(echo ${!host_data[*]});do	
	#rds 连接host	
	#host=`echo $i |awk -F "," '{print $1}'`
	host=$key
	
	#rds 实例ID名称
	rds_id=`echo $host | awk -F '.' '{print $1}'`
	
	#排除备份的数据库列表
	#EX_DATABASES=$(echo ${i}|awk -F ',' '{print $NF}'|awk -F '(' '{print $NF}' |awk -F ')' '{print $1}')
	EX_DATABASES=${host_data[$key]}
	
	#所有的数据库列表
	DATABASES=$(/usr/bin/mysql -h$host -u$db_user -p$db_passwd -e 'show databases'|grep -v mysql|grep -v Database | grep -v sys| grep -v information_schema|grep -v  performance_schema) 
	
	
	if [ ! -d "$backup_path/$rds_id" ];then
		mkdir -p $backup_path/$rds_id
	fi
	
	for database in $DATABASES;do
		DATE=`date +%Y%m%d%H%M%S`
		
		result=$(is_exits "$database" "$EX_DATABASES")
		if [ "$result" != "yes" ];then  #为空 代表备份数据库不在排除数据库里
			/usr/bin/mysqldump --skip-opt --single-transaction --max_allowed_packet=512M -q -h$host -u$db_user -p$db_passwd  $database > $backup_path/$rds_id/$database-$DATE.sql
			if [ $? -eq 0 ]; then
				TIME_LOG=`date "+%Y-%m-%d %H:%M:%S"`
				echo -e "$TIME_LOG \033[32m 数据库 $db_name 自动备份成功\033[0m" >> $LogFile
				feishu '成功'
		
		
			else
				TIME_LOG=`date "+%Y-%m-%d %H:%M:%S"`
				echo -e "$TIME_LOG \033[31m  数据库 $db_name 备份失败\033[0m" >> $LogFile
				feishu '失败'
				rm $backup_path/$rds_id/$database-$DATE.sql -f
			fi		
		fi
		
	done
done

七、mongodb批量备份脚本飞书通知

#!/bin/bash
#set -x
MONGODUMP=/data/mongodb/bin/mongodump
BAK_BASE_DIR=/alidata/mongodb/daily_backup
DATE_DAY=`date +%Y_%m_%d`
Webhook=https://open.feishu.cn/open-apis/bot/v2/hook/id-xx-oo
#内容分别为:连接地址和密码和描述
HOSTS_PASS=(
"dds-xxoo.mongodb.rds.aliyuncs.com,xxxxoooooppp,阿里云xxx实例"
"127.0.0.1,xxxoojjj,自建mongo实例"
)


function feishu(){
	DATE_R=`echo ${TIME_LOG}|awk '{print $1}'`	
	DATE_S=`echo ${TIME_LOG}|awk '{print $2}'`
	curl -X POST \
	  ${Webhook} \
	  -H 'Content-Type: application/json' \
		-d '{
		"msg_type": "post",
		"content": {
			"post": {
				"zh_cn": {
					"content": [
						[
							{
								"tag": "text",
								"text": "mongo备份时间:'${DATE_R}' '${DATE_S}'\n"
							},
							{
								"tag": "text",
								"text": "mongo备份实例:'${desc}'\n"
							},
							{
								"tag": "text",
								"text": "mongo备份状态:'${1}'\n"
							},
							{
								"tag": "text",
								"text": "mongo备份路径:'${backup_path}'"
							}
						],
						[

						]
					]
				}
			}
		}
	}'
}


for i in ${HOSTS_PASS[@]};do
	
	host=$(echo $i|awk -F ',' '{print $1}')
	pass=$(echo $i|awk -F ',' '{print $2}')
	desc=$(echo $i|awk -F ',' '{print $NF}')
	
	if [ "$host" != "127.0.0.1" ];then
		instance_id=$(echo $host|awk -F '.' '{print $1}')
		instance_id=${instance_id%?}
	else
		instance_id=$host
	fi
	
    #备份路径
	backup_path="$BAK_BASE_DIR/$DATE_DAY/$instance_id"
	#echo "开始备份$desc"  "$MONGODUMP -h $host --port 3717 -u root -p $pass -o $backup_path" 备份所有库
	$MONGODUMP -h $host --port 3717 -u root -p $pass -o $backup_path
	if [ $? -eq 0 ]; then
		TIME_LOG=`date "+%Y-%m-%d %H:%M:%S"`
		echo -e "$TIME_LOG \033[32m  $desc 备份成功\033[0m" 
		feishu '成功'
		
		
	else
		TIME_LOG=`date "+%Y-%m-%d %H:%M:%S"`
		echo -e "$TIME_LOG \033[31m  $desc 备份失败\033[0m" 
		feishu '失败'

	fi		
	
	
done

 八、域名证书到期天数飞书通知

#!/bin/bash
# 检测https证书有效期

#获取当前时间
getTime=`date "+%Y-%m-%d %H:%M:%S"`


function feishu_new(){
		feishu_url=xxxxxxxxxxxxxxx
        curl -X POST \
          ${feishu_url} \
          -H 'Content-Type: application/json' \
                -d '
{
    "msg_type": "interactive",
    "card": {
        "config": {
            "wide_screen_mode": true,
            "enable_forward": true
        },
        "elements": [
            {
                "tag": "div",
                "text": {
                    "content": "**域名: **'${2}'\n**还剩多少天到期: **'${1}'",
                    "tag": "lark_md"
                },
                "content": "",
                "elements": null
            },
            {
                "tag": "hr",
                "text": {
                    "content": "",
                    "tag": ""
                },
                "content": "",
                "elements": null
            }
        ],
        "header": {
            "title": {
                "content": "域名到期时间通知",
                "tag": "plain_text"
            },
            "template": "read"
        }
    }
}'
				
}




function checkHttps(){

#while 循环读取/mnt/checkSSL/https_list https域名文本
while read line; do
    #使用openssl获取证书到期日期
    endTime=`echo | timeout 2 openssl s_client -servername ${line} -connect ${line}:443 2>/dev/null |openssl x509 -noout -dates |grep 'After'|awk -F '=' '{print $2}'| awk -F ' +' '{print $1,$2,$4 }'`
    endTimes=`date -d "$endTime" +%s`
    currentTimes=`date -d "$(date -u '+%b %d %T %Y GMT') " +%s `
    #到期时间减去当前时间
    let leftTime=$endTimes-$currentTimes
    #时间格式转换为整数
    days=`expr $leftTime / 86400`
    sleep 3
    echo $getTime $days $line >> /mnt/checkSSL/checkSSL.log
    #判断距离到期日期是否等于小于30天
    [ $days -le 30 ] && [ $days -ne 0 ] &&  feishu_new $days $line

done < /mnt/checkSSL/https_list

}

checkHttps

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值