C/S通信Python版

近来天天看论文看的头疼,想着敲敲代码缓解下,顺便练练python,于是写了下面这个程序
主要功能:1.多C端与S端多线程通信
2.C端之间互相通信(通过S端转发)
3.对C端通信加入了敏感词过滤(DFA算法(中文敏感词过滤)+贝叶斯(英文敏感词过滤))
计划加入功能:1.RSA加密算法
2.在服务器端假设防火墙,可以对数据包做操作(libnids那一套)
3.加入群聊功能
4.图形界面

C端代码

import threading,time
import socket
import random
from Fillter import DFA#过滤言语模块
from Fillter import fillter_machine
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect(('127.0.0.1',8082))
ip=input("请输入要连接的IP:")
port=input("请输入要使用的端口:")
string=ip+','+port
sock.send(string.encode('utf-8'))

def send(sock):
    while 1:
        string=input()
        if DFA.judge(root,string)==1 or fillter_machine.run(string)==1:
            string='***'
            string=string.encode('utf-8')
            print('请文明用语!')
        else:
            string=string.encode('utf-8')
        sock.send(string)

def recv(sock):
    while 1:
        msg=sock.recv(1024)
        if msg:
            print(msg.decode())

#申请线程
def Create_Thread_Recv(sock):
    t1=threading.Thread(target=recv,args=(sock,))
    t1.start()

def Create_Thread_Send(sock):
    t1 =threading.Thread(target=send,args=(sock,))
    t1.start()
    return t1

if __name__=='__main__':
    root=DFA.init()
    Create_Thread_Recv(sock)
    Create_Thread_Send(sock)

另一个C端测试代码

import socket
import threading
import multiprocessing,time
import os,sys


def Create():
    host = 'localhost'
    port = 8083
    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # 绑本地
    server.bind((host, port))
    server.listen(10)
    return server


def recv(sock):
    while 1:
        msg=sock.recv(1024)
        print(msg.decode())

def send(sock):
    while 1:
        sock.send(input().encode('utf-8'))


def Create_Thread_Recv_S2B(sock_B):
    t1=threading.Thread(target=recv,args=(sock_B,))
    t1.start()
def Create_Thread_Send_S2B(sock_B):
    t1 =threading.Thread(target=send,args=(sock_B,))
    t1.start()



if __name__=='__main__':
    server=Create()
    client, address = server.accept()
    print(address)
    Create_Thread_Recv_S2B(client)
    Create_Thread_Send_S2B(client)

S端代码

import socket
import threading
import multiprocessing,time
import os,sys
from scapy.all import *



#创建本地socket
def Create():
    host = ''
    port = 8082
    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # 绑本地
    server.bind((host, port))
    server.listen(10)
    return server
#用来存请求的ip和port
class Client():
    def __init__(self,ip,port):
        self.ip=ip
        self.port=port

#去请求要连接的客户端
def Get_Connection(a,client):
    sock_con = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock_con.connect((a.ip,int(a.port)))
    client.send('连接成功'.encode('utf-8'))
    return sock_con

def send(sock_B,sock_A,string):
    while 1:
        msg=sock_A.recv(1024)
        if msg:
            sock_B.send(msg)
            print('A-->B:'+msg.decode())
        else:
            print('No message')

            time.sleep(2)

def recv(sock_B,sock_A,string_accept):
    while 1:
        msg=sock_B.recv(1024)
        time.sleep(1)
        string_accept=msg.decode()
        print('B-->A:'+msg.decode())
        sock_A.send(string_accept.encode('utf-8'))

#申请线程S->C
def Create_Thread_Recv_S2B(sock_B,sock_A,string_accept):
    t1=threading.Thread(target=recv,args=(sock_B,sock_A,string_accept))
    t1.start()
def Create_Thread_Send_S2B(sock_B,sock_A,string_send):
    t1 =threading.Thread(target=send,args=(sock_B,sock_A,string_send))
    t1.start()



#拦截查包处理
def pcap(ip,port):
    pack=sniff(count=0)
    print(pack)

def Create_Thread_pcapcheck(ip,port):
    t1=threading.Thread(target=pcap,args=(ip,port))
    t1.start()


if __name__=='__main__':
    global string_accept
    global string_send
    string_accept='accept'
    string_send=''
    server=Create()
    client,address=server.accept()
    #先接收对方要连接的IP和端口
    msg=client.recv(1024).decode().split(',')
    a=Client(msg[0],msg[1])
    print(a.ip,a.port)
    client.send("收到IP和端口,请等待连接...".encode('utf-8'))
    s_c = Get_Connection(a,client)
    #创建拦截线程
    Create_Thread_pcapcheck(a.ip,a.port)
    #创建接收和发送线程
    Create_Thread_Recv_S2B(client,s_c,string_accept)
    Create_Thread_Send_S2B(client,s_c,string_send)

DFA模块代码

import jieba

class Node():
    def __init__(self):
        self.children=None
        self.badword=None

def Add(root,word):
    node=root
    for i in range(len(word)):
        if node.children==None:
            node.children={}
            node.children[word[i]]=Node()
        elif word[i] not in node.children:
            node.children[word[i]] = Node()
        node=node.children[word[i]]
    node.badword=word

def init():
    node=Node()
    path='/Users/macbook/Desktop/Python/SOCKET/Fillter/处理后的中文词库/其他词库.txt'
    f=open(path,'r')
    for i in f.readlines():
        Add(node,i.split('\n')[0])
    return node

def Search(root,word):
    for i in range(len(word)):
        p=root
        j=i
        while(j<len(word) and p.children!=None and word[j] in p.children):
            p=p.children[word[j]]
            j+=1
        if p.badword==word[i:j]:
            return 1
    return 0

def judge(root,string):
    s = jieba.cut(string, cut_all=False)
    s='/'.join(s)
    t=s.split('/')
    for i in range(len(t)):
        if Search(root,t[i])==1:
            return 1
        else:
            continue
    return 0

if __name__=='__main__':
    root=init()
    print(judge(root,'第一夫人'))

贝叶斯代码

from numpy import *
import sys,os
import  jieba

mark=[',','.','/','`','!','@','~','#','$','%','^','&','*','(',')','-','_','=','+','|','|',';',':','"','\'','.','/','?']





#拆分语句
def Detach_sentence(data):
    l=[]
    list=[]
    s=jieba.cut(data,cut_all=False)
    string='/'.join(s)
    l=string.split('/')
    for i in range(0,l.__len__()):
        if l[i]  not in mark:
            list.append(l[i])
    return list


def loadDataSet_Chinese():
    path='/Users/macbook/Desktop/Python/SOCKET/Fillter/中文词库/test.txt'
    f=open(path,'r')
    postingList = []
    classVed = []
    for i in f.readlines():
        t = []
        classVed.append(1)
        postingList.append(i.split('\n')[0])
    print(postingList)
    return postingList, classVed


#词条字典构建
def createVocaList_Chinese(dataSet):
    vocabSet=set([])#set会自动滤掉重复的数据
    for document in dataSet:
        #'|'操作可以获取两个列表的并集
        vocabSet=vocabSet|set(dataSet)
    return list(vocabSet)







def loadDataSet_English():
    path='/Users/macbook/Desktop/Python/SOCKET/Fillter/英文敏感词库.txt'
    f=open(path,'r')
    postingList=[]
    classVed=[]
    for i in f.readlines():
        tmp=i.split(';')[0]
        t=[]
        classVed.append(int((i.split(';')[1]).split('\n')[0]))
        t=tmp.split(' ')
        postingList.append(t)

    return postingList,classVed

#词条字典构建
def createVocaList_English(dataSet):
    vocabSet=set([])#set会自动滤掉重复的数据
    for document in dataSet:
        #'|'操作可以获取两个列表的并集
        vocabSet=vocabSet|set(document)
    return list(vocabSet)


#词条向量构建:传入词条字典和待测试的对应文本词条集合,得到词条向量
def setofWord2Vec(vocabList,inputSet):
    #初始化词条向量全为'0'
    returnVec=[0]*len(vocabList)
    for word in inputSet:
        if word in vocabList:
            #如果词条集合中词条出现在字典库中,则标记对应位置的元素为'1'
            returnVec[vocabList.index(word)]=1
        else:
            #print("%s 不在字典中"%word)
            continue
    return returnVec


#trainMatrix:多个词条向量组成的集合矩阵
#trainCategory:分类标签集合
def trainNBO(trainMatrix,trainCategory):
    #记录词条向量个数
    numTrainDocs=len(trainMatrix)

    #单个词条向量的长度
    numWords=len(trainMatrix[0])

    #计算所有词条中,侮辱性的概率
    pAbusive=sum(trainCategory)/float(numTrainDocs)

    # 初始化 侮辱性/非侮辱性 言论中 词条分布总和向量
    # 引入numpy后,zeros(numWords)方法用来得到一个和numWords等长的0矩阵
    p0Num = zeros(numWords)
    p1Num = zeros(numWords)
    #初始化 侮辱性/非侮辱性 言论中 词条总个数
    p0Denom=1.0
    p1Denom=1.0
    for i in range(numTrainDocs):
        if trainCategory[i] == 1:
            # 如果为侮辱性言论,记录所有侮辱性词条向量累加之后的总向量
            p1Num += trainMatrix[i]
            # 记录所有侮辱性言论中,总的词条个数
            p1Denom += sum(trainMatrix[i])
        else:
            # 如果为非侮辱性言论,记录所有非侮辱性词条向量累加之后的总向量
            p0Num += trainMatrix[i]
            # 记录所有非侮辱性言论中,总的词条个数
            p0Denom += sum(trainMatrix[i])

    # 计算出一个标示每个词可能是侮辱性词汇概率的向量
    p1Vect = p1Num / p1Denom
    # 计算出一个标示每个词可能是非侮辱性词汇概率的向量
    p0Vect = p0Num / p0Denom
    return p0Vect, p1Vect, pAbusive


# vec2Classify:待测试数据的词条向量
# pClass1:所有词条向量中是侮辱性词条向量的概率
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
	# sum(vec2Classify * p1Vec)当前词条向量中,每个词条可能是侮辱性词条的概率之积p(w:c1)
	# pClass1 : p(c1)

    p1 = sum(vec2Classify * p1Vec) + log(pClass1)

    p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
    if p1 > p0:
        return 1
    else:
        return 0




def init(inputstring):

    #加载中文词库
    '''postingList, classVec = loadDataSet_Chinese()
    vocabList = createVocaList_Chinese(postingList)'''

    #加载英文词库


    trainMat = []

    postingList, classVec = loadDataSet_English()
    vocabList = createVocaList_English(postingList)
    for posting in postingList:
        word2Vec = setofWord2Vec(vocabList, posting)
        trainMat.append(word2Vec)
    p0V, p1V, pAb = trainNBO(trainMat, classVec)

    teststring=inputstring
    thisDoc = array(setofWord2Vec(vocabList, teststring))
    return classifyNB(thisDoc,p0V, p1V, pAb)


def run(inputstring):
    while 1:
        return init(Detach_sentence(inputstring))


if __name__=='__main__':

    while 1:
        data=input()
        print(run(data))

敏感词库是从网上扒的,这里就不给链接了

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值