python代码实现ws服务压力测试功能

目录

一、多进程+多线程+asyncio事件循环

二、多进程+线程池+asyncio事件循环

三、多进程+asyncio事件循环

四、线程池+asyncio事件循环

       

       压测一般使用jmeter这类压测工具,设置并发数量——线程数,请求轮数,最后统计响应是否正确。最近写了个ws服务,同事使用jmeter进行了压测,我也在想怎么自己实现jmeter的压测功能呢?利用python语音和aiohttp客户端以及多进程多线程之类的知识实现了压力测试功能,这里我实现了四种大同小异的方法。

      整体需求:支持并发数设置,请求轮数,每路并发中请求的间隔以及响应正确率统计 

一、多进程+多线程+asyncio事件循环

多进程+多线程+异步协程来实现模拟Jemter ws的压测——数据统计未实现
多进程:实现用户数并发数的模拟
多线程:模拟不同的轮数的请求——每一个线程模拟一轮请求

进程中开启线程,线程中开启协程调用ws服务;每个线程间隔1秒再启动下一个线程
用来模拟每个用户发起一个请求后,等待一定时间就发起下一个请求

import asyncio
import aiohttp
import multiprocessing
import json
import time
import threading
import logging
import logging.handlers
class Logger(object):
    def __init__(self,log_name,log_level, log_file):
        self.log_name = log_name
        self.log_level = log_level
        self.log_file = log_file
        self.logger  = self.logger_init()


    def logger_init(self):
        logger_name = self.log_name
        log_file = self.log_file
        level = self.log_level
        logger = logging.getLogger(logger_name)
        logger.setLevel(level)
        formatter = logging.Formatter("%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s")

        # fileHandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=83886080, backupCount=20, encoding='utf-8')
        # fileHandler.setFormatter(formatter)
        # logger.addHandler(fileHandler)
        stremaHandler = logging.StreamHandler()
        stremaHandler.setFormatter(formatter)
        logger.addHandler(stremaHandler)
        return logger
import random
random.seed(100)

logger = Logger(log_name='test_log', log_level=10, log_file='./test.log').logger

async def send_message(session, round_index, url, data, user_id):
    try:
        async with session.ws_connect(url) as ws:
            await ws.send_str(data)
            async for msg in ws:
                if msg.type == aiohttp.WSMsgType.TEXT:
                    if msg.data == 'close cmd':
                        await ws.close()
                elif msg.type == aiohttp.WSMsgType.ERROR:
                    logger.info(f'ws connection closed with exception {ws.exception()}')
            logger.info(f"user_id: {user_id}---re_index:{round_index}--finished!")
    except Exception as e:
        print(f"Request failed: {ws.url} - {e}")
        return False


def simulate_user(user_id,round_index, url, data):
    async def _simulate_user():
        async with aiohttp.ClientSession() as session:
            await send_message(session, round_index, url, data, user_id)
    asyncio.run(_simulate_user())


def start_thread(user_id, round_index, url, data):
    # 创建线程
    thread = threading.Thread(target=simulate_user, args=(user_id,round_index, url, data))
    thread.start()



def simulate_round(user_id,num_rounds,url, data):
    for round_index in range(num_rounds):
        # 启动新的线程来处理 WebSocket 连接周期
        logger.info(f"user_id: {user_id}---re_index:{round_index}--start!")
        start_thread(user_id, round_index,url, data)
        # 每轮请求之间间隔1秒
        t = random.random()
        time.sleep(t)


def main(num_client,num_rounds, url, data):
    # 创建并发进程
    processes = []
    for user_id in range(num_client):
        process = multiprocessing.Process(target=simulate_round, args=(user_id,num_rounds,url, data,))
        processes.append(process)
        process.start()

    # 等待所有进程完成
    for process in processes:
        process.join()


if __name__ == '__main__':
    url = "ws://ip:port/ws"
    body = {
        "params": {
            "data": {
                "content":"你好"
                }
        }
    }
    data = json.dumps(body,  ensure_ascii=False)
    num_client = 5  # 设置并发请求数量
    num_rounds = 20  # 总共发送300个请求
    main(num_client,num_rounds, url, data)

二、多进程+线程池+asyncio事件循环

进程中开启多个线程,为了方便管理采用线程池的方式进行

多进程+线程池+异步协程来实现模拟Jemter ws的压测——数据统计未实现
多进程:实现用户数并发数的模拟
线程池:模拟不同的轮数的请求

进程中开启线程池,线程池中每个线程开启协程调用ws服务;每个线程间隔一定时间再启动下一个线程
用来模拟每个用户发起一个请求后,等待一秒就发起下一个请求

为了记录每个请求的状况,需要用一个全局共享的变量,由于是多进程,并且进程中再启动多个线程,涉及到进程间和线程间的资源竞争和共享,因此multiprocessing.Manager()以及加锁机制,直接上代码:


import asyncio
import aiohttp
import multiprocessing
import json
import time
from concurrent.futures import ThreadPoolExecutor
import logging
import logging.handlers


# 创建 Manager 对象
manager = multiprocessing.Manager()
lock = manager.Lock()
# 统计结果
results = manager.dict({'success': 0, 'failure': 0, 'total_time': []})
target_res = "你是谁"

class Logger(object):
    def __init__(self,log_name,log_level, log_file):
        self.log_name = log_name
        self.log_level = log_level
        self.log_file = log_file
        self.logger  = self.logger_init()


    def logger_init(self):
        logger_name = self.log_name
        log_file = self.log_file
        level = self.log_level
        logger = logging.getLogger(logger_name)
        logger.setLevel(level)
        formatter = logging.Formatter("%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s")

        # fileHandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=83886080, backupCount=20, encoding='utf-8')
        # fileHandler.setFormatter(formatter)
        # logger.addHandler(fileHandler)
        stremaHandler = logging.StreamHandler()
        stremaHandler.setFormatter(formatter)
        logger.addHandler(stremaHandler)
        return logger
import random
random.seed(100)

logger = Logger(log_name='test_log', log_level=10, log_file='./test.log').logger

async def send_message(session, round_index, url, data, user_id, lock):
    try:
        start_time = time.time()
        respone = ""
        async with session.ws_connect(url) as ws:
            await ws.send_str(data)
            async for msg in ws:
                if msg.type == aiohttp.WSMsgType.TEXT:
                    if msg.data == 'close cmd':
                        await ws.close()
                    else:
                        respone += json.loads(msg.data)['result']
                elif msg.type == aiohttp.WSMsgType.ERROR:
                    logger.info(f'ws connection closed with exception {ws.exception()}')
            end_time = time.time()
            # with output_lock:
            with lock:
                if target_res == respone:
                    results['success'] += 1
                else:
                    results['failure'] += 1
                # 计算并记录耗时
                temp = results['total_time']
                temp.append(end_time - start_time)
                results['total_time'] = temp
            logger.info(f"user_id: {user_id}---re_index:{round_index}--finished!")
    except Exception as e:
        print(f"Request failed: {ws.url} - {e}")
        return False


def simulate_user(user_id,round_index, url, data, lock):
    async def _simulate_user():
        async with aiohttp.ClientSession() as session:
            await send_message(session, round_index, url, data, user_id, lock)

    asyncio.run(_simulate_user())


def simulate_round(user_id,num_rounds,url, data, interval_time, lock):
    # 每个用户发送指定轮数的消息
    with ThreadPoolExecutor(max_workers=num_rounds) as executor:
        for round_index in range(num_rounds):
            # 使用线程池执行线程
            logger.info(f"user_id: {user_id}---re_index:{round_index}--start!")
            executor.submit(simulate_user, user_id, round_index, url, data, lock)
            time.sleep(interval_time)

        # 等待所有线程完成
        executor.shutdown(wait=True)



def main(num_client,num_rounds, url, data, interval_time):
    start = time.time()
    # 创建并发进程
    processes = []
    for user_id in range(num_client):
        process = multiprocessing.Process(target=simulate_round, args=(user_id,num_rounds,url, data,interval_time, lock,))
        processes.append(process)
        process.start()
        time.sleep(0.6)
    # 等待所有进程完成
    for process in processes:
        process.join()

    end = time.time()
    print(f"total_time: {end - start} --- turn time {(end - start) / num_rounds}")

    # 输出统计结果
    print("results", results)
    total_requests = results['success'] + results['failure']
    accuracy_rate = (results['success'] / total_requests) * 100 if total_requests > 0 else 0
    average_response_time = sum(results['total_time']) / total_requests if total_requests > 0 else 0

    print(f"\nTotal requests: {total_requests}")
    print(f"Success: {results['success']}")
    print(f"Failure: {results['failure']}")
    print(f"Accuracy rate: {accuracy_rate:.2f}%")
    print(
        f"Max:{max(results['total_time'])}--Min{min(results['total_time'])}--Average response time: {average_response_time:.2f} seconds")


if __name__ == '__main__':
    url = "ws://ip:port/ws"
    body = {
        "params": {
            "data": {
                "content": "你好"
            }
        }
    }
    data = json.dumps(body,  ensure_ascii=False)
    num_client = 5  # 设置并发请求数量
    num_rounds = 30  # 总共发送300个请求
    interval_time = 0.5
    main(num_client,num_rounds, url, data,interval_time)

三、多进程+asyncio事件循环

这种方式更轻量一点,不涉及到线程,没有线程的概念,只有进程和协程

多进程+asyncio事件循环来模拟
多进程:实现用户数并发数的模拟
asyncio事件循环中模拟不同的轮数的请求

asyncio事件循环中模拟不同的轮数的请求中create_task一个任务
表示一个请求,然后task创建的时候不需要await不然就是同步串行执行的效果了;等所有的task都创建完毕了在wait

同样的记录请求结果,多个进程间操作一个dict,需要multiprocessing.Manager().dict()来共享变量,由于不存在线程,因此不需要线程锁


import asyncio
import aiohttp
import multiprocessing
import json
import time
import logging
import logging.handlers
class Logger(object):
    def __init__(self,log_name,log_level, log_file):
        self.log_name = log_name
        self.log_level = log_level
        self.log_file = log_file
        self.logger  = self.logger_init()


    def logger_init(self):
        logger_name = self.log_name
        log_file = self.log_file
        level = self.log_level
        logger = logging.getLogger(logger_name)
        logger.setLevel(level)
        formatter = logging.Formatter("%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s")

        # fileHandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=83886080, backupCount=20, encoding='utf-8')
        # fileHandler.setFormatter(formatter)
        # logger.addHandler(fileHandler)
        stremaHandler = logging.StreamHandler()
        stremaHandler.setFormatter(formatter)
        logger.addHandler(stremaHandler)
        return logger
import random
random.seed(100)

logger = Logger(log_name='test_log', log_level=10, log_file='./test.log').logger


# 创建 Manager 对象
manager = multiprocessing.Manager()
# 统计结果
results = manager.dict({'success': 0, 'failure': 0, 'total_time': []})
target_res = "你是谁"
async def send_message(session, round_index, url, data, user_id):
    try:
        start_time = time.time()
        respone = ""
        async with session.ws_connect(url) as ws:
            await ws.send_str(data)
            logger.info(f"user_id: {user_id}---re_index:{round_index}--received!")
            async for msg in ws:
                if msg.type == aiohttp.WSMsgType.TEXT:
                    if msg.data == 'close cmd':
                        await ws.close()
                    else:
                        respone += json.loads(msg.data)['result']
                elif msg.type == aiohttp.WSMsgType.ERROR:
                    logger.info(f'ws connection closed with exception {ws.exception()}')
            logger.info(f"user_id: {user_id}---re_index:{round_index}--finished!")

        end_time = time.time()
        # with output_lock:
        if target_res == respone:
            results['success'] += 1
        else:
            results['failure'] += 1
        # 计算并记录耗时
        temp = results['total_time']
        temp.append(end_time-start_time)
        results['total_time'] = temp
        # logger.info(f"user_id: {user_id}---re_index:{round_index}--finished!-----results:{results}")
    except Exception as e:
        logger.info(f"user_id: {user_id}---re_index:{round_index}----Request failed: - {e}")
        return False


async def simulate_user(user_id, num_rounds,url, data, iswait_last_finish):
    async with aiohttp.ClientSession() as session:
        tasks = []
        for round_index in range(num_rounds):
            logger.info(f"user_id: {user_id}---re_index:{round_index}--start!")
            task = asyncio.create_task(send_message(session, round_index, url, data, user_id))
            tasks.append(task)
            # 等待1秒
            await asyncio.sleep(1)
            if iswait_last_finish:
                # 这句代码屏蔽就可以 完成当前请求发送后等待固定时间发起下一轮请求 而不需要await task 等待这个任务完
                await task

        if not iswait_last_finish:
            # 任务必须等待,不然simulate_user协程立马退出,ws链接也断开了, 服务端ws会出现问题 Cannot write to closing transport
            for task in tasks:
                await task

def simulate_round(user_id,num_rounds,url, data, iswait_last_finish):
    asyncio.run(simulate_user(user_id, num_rounds,url, data, iswait_last_finish))

def main(num_client,num_rounds, url, data, iswait_last_finish):
    start = time.time()
    # 创建并发进程
    processes = []
    for user_id in range(num_client):
        process = multiprocessing.Process(target=simulate_round, args=(user_id,num_rounds,url, data,iswait_last_finish, ))
        processes.append(process)
        process.start()

    # 等待所有进程完成
    for process in processes:
        process.join()

    end = time.time()

    print(f"total_time: {end-start} --- turn time {(end-start)/num_rounds}")

    # 输出统计结果
    print("results",results)
    total_requests = results['success'] + results['failure']
    accuracy_rate = (results['success'] / total_requests) * 100 if total_requests > 0 else 0
    average_response_time = sum(results['total_time']) / total_requests if total_requests > 0 else 0

    print(f"\nTotal requests: {total_requests}")
    print(f"Success: {results['success']}")
    print(f"Failure: {results['failure']}")
    print(f"Accuracy rate: {accuracy_rate:.2f}%")
    print(f"Max:{max(results['total_time'])}--Min{min(results['total_time'])}--Average response time: {average_response_time:.2f} seconds")

if __name__ == '__main__':
    url = "ws://ip:port/ws"
    body = {
    "params": {
        "data": {
           "content":"你好"
           }
        }
    }
    iswait_last_finish = True
    data = json.dumps(body,  ensure_ascii=False)
    num_client = 5
    num_rounds = 10
    main(num_client,num_rounds, url, data, iswait_last_finish)

四、线程池+asyncio事件循环

这种方案应该是最合理的,因为压测一般都是很大的并发路数,开启多个进程,对CPU核数要求较高,而线程并不太耗费资源,因此可以支持比较大的路数。

线程池+异步协程来实现模拟Jemter ws的压测——数据统计未实现
线程池:实现用户数并发数的模拟
异步协程:模拟不同的轮数的请求

线程池模拟用户;异步协程中发送请求间隔1秒再启动下一个线程
用来模拟每个用户发起一个请求后,等待一秒就发起下一个请求

由于多线程所以需要加锁,避免最后的dict记录的结果和真实结果不一致

import asyncio
import aiohttp
import json
import time
import threading
from concurrent.futures import ThreadPoolExecutor
import logging
import logging.handlers
# 定义一个全局的锁
lock = threading.Lock()

# 统计结果
results = {'success': 0, 'failure': 0, 'total_time': []}
target_res = "你好"

class Logger(object):
    def __init__(self,log_name,log_level, log_file):
        self.log_name = log_name
        self.log_level = log_level
        self.log_file = log_file
        self.logger  = self.logger_init()


    def logger_init(self):
        logger_name = self.log_name
        log_file = self.log_file
        level = self.log_level
        logger = logging.getLogger(logger_name)
        logger.setLevel(level)
        formatter = logging.Formatter("%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s")

        # fileHandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=83886080, backupCount=20, encoding='utf-8')
        # fileHandler.setFormatter(formatter)
        # logger.addHandler(fileHandler)
        stremaHandler = logging.StreamHandler()
        stremaHandler.setFormatter(formatter)
        logger.addHandler(stremaHandler)
        return logger
import random
random.seed(100)

logger = Logger(log_name='test_log', log_level=10, log_file='./test.log').logger

async def send_message(session, round_index, url, data, user_id, lock):
    try:
        start_time = time.time()
        respone = ""
        async with session.ws_connect(url) as ws:
            await ws.send_str(data)
            async for msg in ws:
                if msg.type == aiohttp.WSMsgType.TEXT:
                    if msg.data == 'close cmd':
                        await ws.close()
                    else:
                        respone += json.loads(msg.data)['result']
                elif msg.type == aiohttp.WSMsgType.ERROR:
                    logger.info(f'ws connection closed with exception {ws.exception()}')
            end_time = time.time()
            with lock:
                if target_res == respone:
                    results['success'] += 1
                else:
                    results['failure'] += 1
                # 计算并记录耗时
                temp = results['total_time']
                temp.append(end_time - start_time)
                results['total_time'] = temp
            logger.info(f"user_id: {user_id}---re_index:{round_index}--finished!")
    except Exception as e:
        print(f"Request failed: {ws.url} - {e}")
        return False


async def simulate_user(user_id, num_rounds,url, data, iswait_last_finish, lock):
    async with aiohttp.ClientSession() as session:
        tasks = []
        for round_index in range(num_rounds):
            logger.info(f"user_id: {user_id}---re_index:{round_index}--start!")
            task = asyncio.create_task(send_message(session, round_index, url, data, user_id, lock))
            tasks.append(task)
            # 等待1秒
            await asyncio.sleep(1)
            if iswait_last_finish:
                # 这句代码屏蔽就可以 完成当前请求发送后等待固定时间发起下一轮请求 而不需要await task 等待这个任务完
                await task

        if not iswait_last_finish:
            # 任务必须等待,不然simulate_user协程立马退出,ws链接也断开了, 服务端ws会出现问题 Cannot write to closing transport
            for task in tasks:
                await task


def simulate_round(user_id,num_rounds,url, data, iswait_last_finish, lock):
    asyncio.run(simulate_user(user_id, num_rounds,url, data, iswait_last_finish, lock))




def main(num_client,num_rounds, url, data, interval_time, iswait_last_finish, lock):
    start = time.time()
    with ThreadPoolExecutor(max_workers=num_rounds) as executor:
        for user_id in range(num_client):
            # 使用线程池执行线程
            logger.info(f"user_id: {user_id}--start!")
            executor.submit(simulate_round, user_id,num_rounds,url, data, iswait_last_finish, lock)
            time.sleep(interval_time)

        # 等待所有线程完成
        executor.shutdown(wait=True)

    end = time.time()
    print(f"total_time: {end - start} --- turn time {(end - start) / num_rounds}")

    # 输出统计结果
    print("results", results)
    total_requests = results['success'] + results['failure']
    accuracy_rate = (results['success'] / total_requests) * 100 if total_requests > 0 else 0
    average_response_time = sum(results['total_time']) / total_requests if total_requests > 0 else 0

    print(f"\nTotal requests: {total_requests}")
    print(f"Success: {results['success']}")
    print(f"Failure: {results['failure']}")
    print(f"Accuracy rate: {accuracy_rate:.2f}%")
    print(
        f"Max:{max(results['total_time'])}--Min{min(results['total_time'])}--Average response time: {average_response_time:.2f} seconds")


if __name__ == '__main__':
    url = "ws://ip:port/ws"
    body = {
        "params": {
            "data": {
                "content":"你好"
            }
        }
    }
    iswait_last_finish = True
    data = json.dumps(body,  ensure_ascii=False)
    num_client = 5  # 设置并发请求数量
    num_rounds = 30  # 总共发送300个请求
    interval_time = 0.5
    main(num_client,num_rounds, url, data, interval_time, iswait_last_finish, lock)

总体而言以上代码都能实现简单的压测,也可以用golang来实现,go启动协程的方式更方便。注意线程池提交task后需要等待task完成。可以使用concurrent.futures.wait(tasks)来等待tasks的完成

concurrent.futures.wait(tasks)

也可以是

executor.shutdown(wait=True)

等待任务完成,并且释放线程资源

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值