[PROJECT] DEPLOY CTI && PBX

1 Document Preparation

  • Deploy-Node Run
cd /data/
mkdir callcenter
cd callcenter/

# 文件传输
rz # {cmftcc.zip}
yum install -y unzip zip

unzip cmftcc.zip
cd cmftcc
pwd
/data/callcenter/cmftcc/
ls
# freeswitch keepalived wise2cdeploy

cd freeswitch/
ls
# freeswitch.tar.bz2  fsdeb.tar  rainny.tar.bz2

1.1 文件准备

  1. 拷贝文件 CMFTCOPY.zip 到跳板机/data/callcenter/
  2. 从跳板机上拷贝到每个机器的/data/callcenter/
  3. 解压得到 keepalived, freeswtich,

CMFTCOPY.tar.gz

1.2 Image Preparation

  1. load freeswtich压缩文件到docker 镜像
  2. 并上传到harbor仓库

1.2.1 deploy-node run

  • fsdeb.tar
cd /data/callcenter/cmftcc/freeswitch/
ls
# freeswitch.tar.bz2  fsdeb.tar  rainny.tar.bz2
  • 浏览器打开harbor_node_ip harbor仓库(登陆置灰,也能点击登陆),打开library 项目,可以查看到一些镜像,此时不需要在界面上做任何操作,在deploy-node上执行以下命令,之后在查看Harbor中的镜像

  • command set

cd /data/callcenter/freeswitch/

docker images
# docker load xxx.tar to generate local-images
docker load -i fsdeb.tar
docker images

cat /etc/docker/daemon.json
# docker tag
docker tag fs:deb1 harbor_node_ip/library/fs:deb1
docker rmi fs:deb1
docker images

# docker login Harbor
cat /etc/docker/daemon.json
docker login harbor_node_ip
Username: admin
Password:           #password: Harbor12345
Login Succeeded

# docker push local-images to Harbor
docker push harbor_node_ip/library/fs:deb1

1.2.2 验证Harbor仓库

  • 此时浏览器刷新harbor,可查看到推送的镜像

  • 补充: docker images 查看镜像

功能命令
删除镜像docker rmi 仓库:TAG
删除镜像docker rmi ${IMAGE ID}
docker load -i 镜像经过压缩的tar文件
docker tag 旧镜像repo:tag 新镜像repo:tag
docker push 镜像repo:tag (执行前,需要docker login docker-register-server-ip)
docker login docker-register-server-ip
  • 补充: docker images 查看镜像
    docker rmi 仓库:TAG 用于删除镜像
    docker rmi ${IMAGE ID} 用于删除镜像
    docker load -i 镜像经过压缩的tar文件
    docker tag 旧镜像repo:tag 新镜像repo:tag
    docker push 镜像repo:tag (执行前,需要docker login docker-register-server-ip)

3 keepalived

3.1 Document preparation

  • Deploy-Node Run
cd /data/callcenter/
# ssh transfer
scp cmftcc.zip cti1_ip:/data/callcenter/
scp cmftcc.zip cti2_ip:/data/callcenter/
scp cmftcc.zip pbx1_ip:/data/callcenter/
scp cmftcc.zip pbx2_ip:/data/callcenter/
  • Worker-Nodes Run
#  Deploy-Node run "ssh 'worker_node_ip'" to login worker-node
ssh 'worker_node_ip'

# worker_nodes run
cd /data/callcenter/
ls	# cmftcc.zip

yum install -y unzip zip

unzip cmftcc.zip
cd cmftcc
pwd # /data/callcenter/cmftcc/
ls # freeswitch keepalived

cd /data/callcenter/cmftcc/keepalived/
ls
# keepalived-1.3.5-8.el7_6.x86_64.rpm  
# lm_sensors-libs-3.4.0-6.20160601gitf9185e5.el7.x86_64.rpm  
# net-snmp-agent-libs-5.7.2-37.el7.x86_64.rpm  
# net-snmp-libs-5.7.2-37.el7.x86_64.rpm

yum localinstall *.rpm -y
systemctl enable keepalived.service;systemctl start keepalived.service

3.2 IPPBX

  • 注意keepavlied.conf 里面,主从的virtual_router_id 必须一致,最好不要大于255,而 router_id 必须不一样,主从的router_id,state,priority 不能一样,具体如下。

  • 100.67.33.7

 # Backup
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.ori



vim /etc/keepalived/keepalived.conf
  ###### begin
  [root@nodeippbx1 keepalived]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id ippbx_master
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 188
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       100.67.33.16/23 dev eth0 label eth0:1
    }
}
###### end
  • 100.67.33.8
###### begin
[root@nodeippbx2 keepalived]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id ippbx_backup
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 188
    priority 20
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       100.67.33.16/23 dev eth0 label eth0:1
    }
}
 ###### end
  • 补充
 两台主机分别:  systemctl  enable keepalived && systemctl start keepalived && ps -ef | grep keepalived
 分别查看ip:   ifconfig,会发现在主节点上有:
   eth0:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 100.67.33.16  netmask 255.255.254.0  broadcast 0.0.0.0
        ether fa:16:3e:dd:97:61  txqueuelen 1000  (Ethernet)
 在主节点关闭keepalived,再分别查看ip,会发现从节点出现,主节点没有此vip了:
   eth0:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 100.67.33.16  netmask 255.255.254.0  broadcast 0.0.0.0
        ether fa:16:3e:06:a4:f6  txqueuelen 1000  (Ethernet)

3.3 CTI

  • 100.67.33.9
##################begin
[root@nodecti1 keepalived]# cat keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id cti_master
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 189
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 2222
    }
    virtual_ipaddress {
       100.67.33.17/23 dev eth0 label eth0:1
    }
}
##################end
  • 100.67.33.10
 ##################begin
 [root@nodecti2 keepalived]# cat /etc/keepalived/keepalived.conf
global_defs {
   router_id cti_backup
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 189
    priority 20
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 2222
    }
    virtual_ipaddress {
       100.67.33.17/23 dev eth0 label eth0:1
    }
}
##################end

3.4 测试Keepalived

两台节点分别执行:

[root@nodecti1 keepalived]# systemctl enable keepalived
[root@nodecti1 keepalived]# systemctl start keepalived 
[root@nodecti1 keepalived]# ps -ef | grep keep
root      1780     1  0 Jan29 ttyS0    00:00:00 /sbin/agetty --keep-baud 115200 38400 9600 ttyS0 vt220
root     16941     1  0 19:20 ?        00:00:00 /usr/sbin/keepalived -D
root     16942 16941  0 19:20 ?        00:00:00 /usr/sbin/keepalived -D
root     16943 16941  0 19:20 ?        00:00:00 /usr/sbin/keepalived -D
root     18826  4506  0 19:22 pts/1    00:00:00 grep --color=auto keep

[root@nodecti1 keepalived]# ifconfig | grep eth0   ## 在主节点能查到两个ip,从节点只有一个,当主节点停止keepalived服务后,从节点多了vip
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
eth0:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
[root@nodecti1 keepalived]# ifconfig | grep 67
        inet 100.67.33.9  netmask 255.255.254.0  broadcast 100.67.33.255
        inet 100.67.33.17  netmask 255.255.254.0  broadcast 0.0.0.0
        RX packets 723588  bytes 667124730 (636.2 MiB)
        RX packets 459  bytes 86709 (84.6 KiB)
vethe922676: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        TX packets 750673  bytes 669843972 (638.8 MiB)
        inet6 fe80::867:1dff:fe29:55d0  prefixlen 64  scopeid 0x20<link>
        ether 0a:67:1d:29:55:d0  txqueuelen 0  (Ethernet)

4 Deploy POD

说明:

  • Command Set
cd /data/callcenter
rz 
mkdir localrpm
rz # 

# 
scp -r bzip2-1.0.6-13.e17.x86_64.rpm worker_node_ip:/data
yum localinstall -y *.rpm

cd /data/callcenter/cmftcc/freeswitch/
scp -r rainny.tar.bz2 worker_node_ip:/data
scp -r freeswitch.tar.bz2 worker_node_ip:/data



# worker_node 运行
cd /data
tar -xvfj rainny.tar.bz2
tar -xvfj freeswitch.tar.bz2

ls # {rainny, freeswitch, rainny.tar.bz2, freeswitch.tar.bz2}

4.1 Document Preparation

4.1.1 Worker-Node

  • CTI Node && PBX Node
cd /data/callcenter/cmftcc/freeswitch/
scp -r rainny.tar.bz2 worker_node_ip:/data
scp -r freeswitch.tar.bz2 worker_node_ip:/data

# loaclinstall bz2
# yum localinstall ./bzip2-1.0.6-13.el7.x86_64.rpm

# worker_node run
cd /data

# tar -xjvf xxx.tar.bz2
tar -xvfj rainny.tar.bz2
tar -xvfj freeswitch.tar.bz2

ls # {rainny, freeswitch, rainny.tar.bz2, freeswitch.tar.bz2}

4.1.2 Shell Script

  • CTI
cp cti_rc.local /data/rainny/
cp ctiwatchdog  /data/rainny/cti/
mv /data/rainny/cti_rc.local /data/rainny/rc.local
chmod +x /data/rainny/rc.local
chmod +x /data/rainny/cti/ctiwatchdog
  • PBX
cp pbx_rc.local /data/rainny/
cp FreeswitchWatchdog /data/rainny/ippbx/
mv /data/rainny/pbx_rc.local /data/rainny/rc.local
chmod +x /data/rainny/rc.local
chmod +x /data/rainny/ippbx/FreeswitchWatchdog

4.1.3 localinstall bzip2

  • rpm包准备
    需要从外拷贝bzip2-1.0.6-13.el7.x86_64.rpm 到deploy机器,并 yum localinstall ./bzip2-1.0.6-13.el7.x86_64.rpm

  • 本地安装安装bzip2

scp -r bzip2-1.0.6-13.e17.x86_64.rpm worker_node_ip:/data
# localinstall .rpm
yum localinstall ./bzip2-1.0.6-13.el7.x86_64.rpm

4.2 准备映射到pod内的host内容

4.2.1 CTI

  • rc.local
    注:给rc.local赋予运行权限
cd /data/rainny/
chomod +x rc.local
ls
rc.loacl

# 注:容器不能在后台运行

cat /data/rainny/rc.local

----------------------------------------------------------------
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.

service ssh start

cd /var/rainny/cti/rainnyinfo/

cp ESL.so /usr/lib/
cp ManagedEsl.dll /usr/lib

cd /var/rainny/cti/

./ctiwatchdog
----------------------------------------------------------------

在这里插入图片描述

  • ctiwatchdog
cd /data/rainny/cti
chmod +x ctiwatchdog

cat /data/rainny/cti/ctiwatchdog

#!/bin/sh
while true
do
	sn=`ps -ef | grep rainnytech.exe| grep -v grep`
	if [ "${sn}" = "" ]
	then
		cd /var/rainny/cti/rainnyinfo/
		echo 111 >>/var/rainny/cti/rainnyinfo/ctiwatchdog.log
		setsid mono rainnytech.exe
		echo 222 >>/var/rainny/cti/rainnyinfo/ctiwatchdog.log
		echo `date` ------------------ctiserver is restart >>/var/rainny/cti/rainnyinfo/ctiwatchdog.log
	else
		echo `date` ctiserver is running >>/var/rainny/cti/rainnyinfo/ctiwatchdog.log
	fi
	sleep 2
done

在这里插入图片描述

  • 复制以下文件到
cd /data/rainny/cti/rainnyinfo

# 注:以下两个文件copy到以上路径
cp CTI2.dll /data/rainny/cti/rainnyinfo/
cp rainnytech.exe /data/rainny/cti/rainnyinfo/

4.2.2 IPPBX

  • rc.local
cd /data/rainny/
chmod +x rc.local
ls
rc.loacl

# 注:容器不能在后台运行
# 注:rc.local 里边的路径都是容器里面的路径,容器运行起来后区执行此脚本

cat /data/rainny/rc.local

----------------------------------------------------------------
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.

service ssh start

cd /var/rainny/ippbx/

./FreeswitchWatchdog
----------------------------------------------------------------

在这里插入图片描述

  • Freeswitchwatchdog
cd /data/rainny/ippbx
chmod +x FreeswitchWatchdog

----------------------------------------------------------------
#!/bin/sh
while true
do
sn=`ps -ef | grep freeswitch | grep -v grep`
	if [ "${sn}" = "" ]
	then
		cd /usr/local/freeswitch/bin
		echo restart >>/var/rainny/cti/log/freeswitch.log
		./freeswitch -nonat -nc
		echo start>>/var/rainny/cti/log/freeswitch.log
		echo `date` ------------------freeswitch is restart >>/var/rainny/cti/log/freeswitch.log
	else
		echo `date` freeswitch is running >>/var/rainny/cti/log/freeswitch.log
	fi
	sleep 2;
done
----------------------------------------------------------------

在这里插入图片描述

  • Free License
# FreeSWITCH Free License
# 
freeswitch.tar.bz2
# path:
cd 
freeswitch.xml

4.3 APPLY POD

4.3.1 Maser-Node Run

  • 为四个worker-node节点打标签
[root@master1 ~]# kubectl get nodes -o wide
NAME         STATUS    ROLES     AGE       VERSION
master1      Ready     master    1d        v1.11.8
master2      Ready     master    1d        v1.11.8
master3      Ready     master    1d        v1.11.8
nodecti1     Ready     <none>    1d        v1.11.8
nodecti2     Ready     <none>    1d        v1.11.8
nodeippbx1   Ready     <none>    1d        v1.11.8
nodeippbx2   Ready     <none>    1d        v1.11.8
[root@master1 ~]# kubectl label node nodeippbx1 pcc/ippbx=true
[root@master1 ~]# kubectl label node nodeippbx2 pcc/ippbx=true
[root@master1 ~]# kubectl label node nodecti1 pcc/cti=true
[root@master1 ~]# kubectl label node nodecti2 pcc/cti=true

4.3.2 修改配置文件 pcc.yaml

  • Master-Node Run pcc.yml
###############################################################
# modify: image: 100.67.33.9/library/fs:deb1
# command: ["/bin/sh"]
# # args: ["-c", "while true; do echo hello; sleep 3;done"]
# args: ["/var/rainny/rc.local"]
#
# 注:args 地址为容器里面的地址
# 注:在 master-node下运行 pcc.yml
################################################################

apiVersion: v1
kind: Namespace
metadata:
  name: pcc
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: ippbx
  namespace: pcc
spec:
  selector:
    matchLabels:
      app: ippbx
  serviceName: "ippbx"
  replicas: 2
  template:
    metadata:
      labels:
        app: ippbx
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - ippbx
            topologyKey: kubernetes.io/hostname
      terminationGracePeriodSeconds: 1
      hostNetwork: true
      containers:
      - name: ippbx
        image: 100.67.33.9/library/fs:deb1
        env:
        - name: TZ
          value: "Asia/Shanghai"
        command: ["/bin/sh"]
#        args: ["-c", "while true; do echo hello; sleep 3;done"]
        args: ["/var/rainny/rc.local"]
        securityContext:
          privileged: true
        volumeMounts:
        - mountPath: /var/rainny
          name: rainny
        - mountPath: /usr/local/freeswitch/mod/managed
          name: managed
        - mountPath: /var/log
          name: log
        - mountPath: /etc/localtime
          name: localtime
          readOnly: true
        - mountPath: /etc/timezone
          name: localtime
          readOnly: true
        - mountPath: /usr/local/freeswitch
          name: freeswitch
        - mountPath: /etc/odbc.ini
          name: odbc-ini
        - mountPath: /etc/odbcinst.ini
          name: odbcinst-ini
#        readinessProbe:
#          tcpSocket:
#            port: 4473
#          initialDelaySeconds: 5
#          periodSeconds: 10
#        livenessProbe:
#          tcpSocket:
#            port: 4473
#          initialDelaySeconds: 15
#          periodSeconds: 20
      restartPolicy: Always
      nodeSelector:
        pcc/ippbx: "true"
      volumes:
      - name: rainny
        hostPath:
          path: /data/rainny
          type: Directory
      - name: managed
        hostPath:
          path: /data/rainny/ippbx/managed
      - name: log
        hostPath:
          path: /data/rainny/ippbx/log
      - name: localtime
        hostPath:
          path: /etc/localtime
      - name: timezone
        hostPath:
          path: /etc/timezone
      - name: freeswitch
        hostPath:
          path: /data/freeswitch
          type: Directory
      - name: odbc-ini
        hostPath:
          path: /data/rainny/odbc.ini
      - name: odbcinst-ini
        hostPath:
          path: /data/rainny/odbcinst.ini

#---
#apiVersion: v1
#kind: Namespace
#metadata:
#  name: pcc
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: cti
  namespace: pcc
spec:
  selector:
    matchLabels:
      app: cti
  serviceName: "cti"
  replicas: 2
  template:
    metadata:
      labels:
        app: cti
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - cti
            topologyKey: kubernetes.io/hostname
      terminationGracePeriodSeconds: 1
      hostNetwork: true
      containers:
      - name: cti
        image: 100.67.33.9/library/fs:deb1
        env:
        - name: TZ
          value: "Asia/Shanghai"
        command: ["/bin/sh"]
#        args: ["-c", "while true; do echo hello; sleep 3;done"]
        args: ["/var/rainny/rc.local"]
        securityContext:
          privileged: true
        volumeMounts:
        - mountPath: /var/rainny
          name: rainny
        - mountPath: /usr/local/freeswitch/mod/managed
          name: managed
        - mountPath: /var/log
          name: log
        - mountPath: /etc/localtime
          name: localtime
          readOnly: true
        - mountPath: /etc/timezone
          name: localtime
          readOnly: true
        - mountPath: /usr/local/freeswitch
          name: freeswitch
        - mountPath: /etc/odbc.ini
          name: odbc-ini
        - mountPath: /etc/odbcinst.ini
          name: odbcinst-ini
#        readinessProbe:
#          tcpSocket:
#            port: 4473
#          initialDelaySeconds: 5
#          periodSeconds: 10
#        livenessProbe:
#          tcpSocket:
#            port: 4473
#          initialDelaySeconds: 15
#          periodSeconds: 20
      restartPolicy: Always
      nodeSelector:
        pcc/cti: "true"
      volumes:
      - name: rainny
        hostPath:
          path: /data/rainny
          type: Directory
      - name: managed 
        hostPath:
          path: /data/rainny/cti/managed
      - name: log
        hostPath:
          path: /data/rainny/cti/log
      - name: localtime
        hostPath:
          path: /etc/localtime
      - name: timezone
        hostPath:
          path: /etc/timezone
      - name: freeswitch
        hostPath:
          path: /data/freeswitch
          type: Directory
      - name: odbc-ini
        hostPath:
          path: /data/rainny/odbc.ini
      - name: odbcinst-ini
        hostPath:
          path: /data/rainny/odbcinst.ini

APPLY POD

  • Master-Node Run
kubectl delete -f pcc.yml ; kubectl apply -f pcc.yml ; sleep 6 ;kubectl get pod -n pcc -o wide

5 Config

5.1 CORE

  • databases


SELECT CONCAT('drop table ',table_name,';') FROM information_schema.`TABLES` WHERE table_schema='pccpbx';
SELECT COUNT(*) TABLES, table_schema FROM information_schema.TABLES   WHERE table_schema = 'pcccti' GROUP BY table_schema;

  • internal.xml


5.2 PROXY





5 运维

5.1 常用的命令

  • 搜索文件
find -name "*.yml"
find -name "*.yaml"
ActionCommandRemarks
查看日志kubectl logs -f pod/ippbx-0 -n pcc
详细的日志kubectl describe pod/ippbx-0 -n pcc
查看节点kubectl get nodes/pod -n pcc
restartsystemctl start keepalived
check processps -ef | grep keepalived
check ipifconfig | grep eth0
check ipip -a
  • vim 配置alias命令
vim /etc/profile/

alias vi='vim'

source /etc/profile
  • K8S运维命令
# 
kubectl apply -f pcc.yml
Kubectl delete -f pcc.yml
Kubectl get nodes
Kubectl get pods
Kubectl get po
Kubectl get po -n pcc
Kuberctl get po -n pcc -o wide

# sh是/bin/sh, 如果pod只有一个容器,则容器名可省略
Kubectl exec -it  pod-name  -n pcc sh

# 查看日志
Kubectl logs -f  $pod -n pcc
Kubectl describe  pod $pod  -n pcc

Kubectl get deployment
Kubectl get deployment  -n pcc
Kubectl get deployment   --all-namespaces


# statefulset
kubectl get sts -n pcc

# service
kubectl	get service --all-namespaces

kubectl exec -it ippbx-1 -n pcc sh

使用 kubectl  exec  -it   pod -n pcc   sh, 可以登入容器内的sh终端,exit退出,退出后pod依然运行

pcc.yml 的配置:
voluems:定义节点主机的目录或者文件
command和args定义容器运行的程序

cd /usr/local/freeswitch/bin
./fs_cli -H 127.0.0.1 -P 4521 -p wzw

在这里插入图片描述

D. SIP-PROXY安装

1. localrpm ready

  • bzip2-1.0.6-13.el7.x86_64.rpm
  • docker-rpm.tar.gz

docker local install

cat >> /etc/yum.repos.d/docker-ce-local.repo << EOF
[docker]
name=docker-local-repo
baseurl=file:///breeze/rpms/
gpgcheck=0
enabled=1
EOF

# 刷新缓存
yum clean all && yum makecache

#安装docker和docker-compose
yum install -y docker docker-compose

#启动docker服务并设为开机启动
systemctl start docker && systemctl enable docker

docker load -i docker-image.tar
docker images


docker load -i fsdeb.tar

  • 安装记录
    1  [2019-03-28 17:29:19] vim /etc/ssh/sshd_config 
    2  [2019-03-28 17:30:06] cp /etc/ssh/sshd_config /etc/ssh/sshd_config.ori
    3  [2019-03-28 17:30:08] vim /etc/ssh/sshd_config 
    4  [2019-03-28 17:31:04] systemctl restart sshd
    5  [2019-03-28 17:35:06] cd /etc/ssh
    6  [2019-03-28 17:35:07] ls
    7  [2019-03-28 17:35:14] cp sshd_config sshd_config.ori
    8  [2019-03-28 17:35:18] cp sshd_config sshd_config.ori2
    9  [2019-03-28 17:35:21] vi sshd_config
   10  [2019-03-28 17:35:30] ifconfig
   11  [2019-03-28 17:35:39] systemctl restart sshd
   12  [2019-03-29 14:07:24] ls
   13  [2019-03-29 14:07:29] cd /app
   14  [2019-03-29 14:07:30] ls
   15  [2019-03-29 14:07:54] cat >> /etc/yum.repos.d/docker-ce-local.repo << EOF
[docker]
name=docker-local-repo
baseurl=file:///breeze/rpms/
gpgcheck=0
enabled=1
EOF

   16  [2019-03-29 14:08:17] vim /etc/yum.repos.d/docker-ce-local.repo 
   17  [2019-03-29 14:09:32] ls
   18  [2019-03-29 14:09:34] cd localrpm/
   19  [2019-03-29 14:09:35] ls
   20  [2019-03-29 14:09:51] tar -zxvf docker-rpm.tar.gz 
   21  [2019-03-29 14:10:02] ls
   22  [2019-03-29 14:10:14] vim /etc/yum.repos.d/docker-ce-local.repo 
   23  [2019-03-29 14:10:28] yum clean all && yum makecache
   24  [2019-03-29 14:11:00] yum install -y docker docker-compose
   25  [2019-03-29 14:11:59] systemctl start docker && systemctl enable docker
   26  [2019-03-29 14:12:11] ls
   27  [2019-03-29 14:12:34] yum localinstall bzip2-1.0.6-13.el7.x86_64.rpm 
   28  [2019-03-29 14:34:24] cd ../SIPPROXY/
   29  [2019-03-29 14:34:25] ls
   30  [2019-03-29 14:34:37] tar -zxvf rainny.tar.gz 
   31  [2019-03-29 14:34:44] ls
   32  [2019-03-29 14:34:55] cd app/
   33  [2019-03-29 14:34:57] ls
   34  [2019-03-29 14:35:05] mv rainny ../
   35  [2019-03-29 14:35:06] ls
   36  [2019-03-29 14:35:10] cd ..
   37  [2019-03-29 14:35:11] ls
   38  [2019-03-29 14:35:21] rm -rf app
   39  [2019-03-29 14:35:23] ls
   40  [2019-03-29 14:35:31] cd rainny/
   41  [2019-03-29 14:35:33] ls
   42  [2019-03-29 14:46:22] vim FreeswitchWatchdog 
   43  [2019-03-29 14:47:27] chmod +x FreeswitchWatchdog 
   44  [2019-03-29 14:47:31] ll
   45  [2019-03-29 14:47:46] chmod +x docker.sh 
   46  [2019-03-29 14:47:49] ll
   47  [2019-03-29 14:48:08] ./docker.sh 
   48  [2019-03-29 14:48:25] vim docker.sh 
   49  [2019-03-29 15:40:01] ps -ef |grep docker.sh 
   50  [2019-03-29 15:40:06] ps -ef |grep docke
   51  [2019-03-29 15:40:20] ps -ef |grep freeswitch
   52  [2019-03-29 15:43:30] docker images
   53  [2019-03-29 15:43:51] ls
   54  [2019-03-29 15:43:58] cd /app
   55  [2019-03-29 15:43:59] ls
   56  [2019-03-29 15:44:04] cd SIPPROXY/
   57  [2019-03-29 15:44:05] ls
   58  [2019-03-29 15:44:14] cd rainny/
   59  [2019-03-29 15:44:15] ls
   60  [2019-03-29 15:47:02] cd ..
   61  [2019-03-29 15:47:04] ls
   62  [2019-03-29 15:47:07] cd ..
   63  [2019-03-29 15:47:09] ls
   64  [2019-03-29 15:47:13] cd ..
   65  [2019-03-29 15:47:15] ls
   66  [2019-03-29 15:56:48] history 

常用的命令

PATH:/var/rainny/ippbx


运维

常用的命令

  • 抓包分析
tcpdump -np -s 0 -i eth0 -w /app/demp.pcap udp

tcpdump -np -s 0 -i eth0   udp

  • K8S调试命令

  • Master节点

  • Path:/root/

kubectl delete -f pcc.yml;
kubectl apply -f pcc.yml ; sleep 6;
kubectl get pod -n pcc -o wide;

kubectl exec -it ippbx-1 -n pcc sh
功能命令备注
保存操作history >> /root/nodeippbx2
查看日志kubectl logs -f ippbx-0
删除podkubectl delete -f pcc.yml;
创建podkubectl apply -f pcc.yml ; sleep 6;
查看podkubectl get pod -n pcc -o wide;
进入podkubectl exec -it ippbx-1 -n pcc sh
  • 搜索文件
  252  [2019-03-27 11:43:11] find -name "*.yml"
  253  [2019-03-27 11:44:24] find -name "*.yaml"
kubectl delete -f pcc.yml ; kubectl apply -f pcc.yml ; sleep 6 ;kubectl get pod -n pcc -o wide

./fs_cli -H 127.0.0.1 -p wzw -P 4521
docker load -i fsdeb.tar

docker images

mv  freeswitch freeswitch.old

Reference

[1] tar.bz2文件解压命令
[2] Linux运行shell脚本提示No such file or directory错误的解决办法

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值