提醒: 千万不能用win电脑去运行web进行压测,win电脑没优化,并发从几百到2000不等,根本达不到上万的qps
gin web代码:https://blog.csdn.net/cabing2005/article/details/78884138
[root@ycnode2 yc]# cat 1-test.go
package main
import (
"time"
"net/http"
"github.com/gin-gonic/gin"
)
func main() {
route := gin.Default()
route.GET("/ping", func(c *gin.Context) {
//time.Sleep(time.Millisecond * 1)
c.String(http.StatusOK, "OK")
})
s := &http.Server{
Addr: ":8090",
Handler: route,
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
MaxHeaderBytes: 1 << 20,
}
s.ListenAndServe()
}
直接压测
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8090/ping
Running 30s test @ http://192.168.11.81:8090/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 86.22ms 92.88ms 990.02ms 86.65%
Req/Sec 1.52k 631.56 3.48k 64.69%
452437 requests in 30.06s, 50.91MB read
Socket errors: connect 0, read 6, write 0, timeout 0
Requests/sec: 15051.64
配置:
worker_processes 2;
worker_rlimit_nofile 51200;
events {
use epoll;
worker_connections 51200;
}
结果:
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 110.29ms 74.04ms 1.00s 73.43%
Req/Sec 0.97k 235.85 3.48k 84.11%
286701 requests in 30.07s, 44.83MB read
Requests/sec: 9533.13
Transfer/sec: 1.49MB
worker改为1:
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 96.37ms 59.48ms 1.19s 92.29%
Req/Sec 1.08k 205.07 3.90k 77.04%
320720 requests in 30.06s, 50.15MB read
Requests/sec: 10668.05
Transfer/sec: 1.67MB
开启sendfile: 没实质性变化
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 96.79ms 61.50ms 1.11s 92.40%
Req/Sec 1.08k 194.98 3.67k 78.12%
319743 requests in 30.07s, 49.99MB read
Requests/sec: 10633.30
Transfer/sec: 1.66MB
开启sendfile 和 nopush
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 98.76ms 76.20ms 1.18s 96.14%
Req/Sec 1.08k 215.14 3.38k 78.24%
320732 requests in 30.10s, 50.15MB read
Requests/sec: 10655.89
Transfer/sec: 1.67MB
开启sendfile nopush keepalived 60s
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 95.35ms 45.15ms 1.13s 89.35%
Req/Sec 1.07k 194.47 1.89k 74.57%
319669 requests in 30.10s, 47.85MB read
Requests/sec: 10621.96
Transfer/sec: 1.59MB
设置buffer 1
proxy_buffers 8 8k;
Requests/sec: 10567.98
Transfer/sec: 1.58MB
proxy_buffers 16 8k;
Requests/sec: 10627.78
Transfer/sec: 1.59MB
proxy_buffers 512 8k;
Requests/sec: 10503.37
Transfer/sec: 1.57MB
wrk在服务器81 go在 82
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.82:8090/ping
Running 30s test @ http://192.168.11.82:8090/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 54.02ms 38.40ms 469.33ms 67.35%
Req/Sec 1.98k 336.28 3.04k 75.44%
587699 requests in 30.02s, 66.14MB read
Requests/sec: 19574.33
Transfer/sec: 2.20MB
wrk nginx在81 go在82 性能下降好严重
worker 1
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 205.10ms 111.88ms 1.97s 92.02%
Req/Sec 487.99 201.67 1.48k 68.98%
145579 requests in 30.09s, 21.79MB read
Socket errors: connect 0, read 0, write 0, timeout 382
Requests/sec: 4837.92
Transfer/sec: 741.59KB
worker 2
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 133.35ms 119.06ms 1.28s 94.00%
Req/Sec 846.25 193.02 1.62k 68.98%
252730 requests in 30.08s, 37.83MB read
Requests/sec: 8401.85
Transfer/sec: 1.26MB
我靠 nginx单核1万
[root@ycnode1 tools]# ./wrk -c 1000 -t 10 -d 30 http://192.168.11.81:8990/ping
Running 30s test @ http://192.168.11.81:8990/ping
10 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 50.67ms 79.71ms 1.99s 97.67%
Req/Sec 1.12k 412.45 6.65k 79.56%
332319 requests in 30.06s, 49.74MB read
Socket errors: connect 0, read 8, write 428, timeout 2098
Requests/sec: 11055.09
Transfer/sec: 1.65MB
抓包分析:
tcpdump -i ens33 host 192.168.11.82 and port 8090 -n -O -w package.cap
wireshark分析 http 请求 Connection: 字段如果为close 则为短链接
压测结果:
1、 wrk nginx gin网站代码 在同一个服务器 和 gin网站代码单独一个服务器 区别不大 只要套接字 fd句柄够用
2、 nginx配置长链接:https://www.cnblogs.com/liufarui/p/11075630.html
1、 保持和client的长链接
http {
keepalive_timeout 60s;
}
2、 保持和server的长链接 https://www.cnblogs.com/liufarui/p/11075630.html
upstream backend {
server 192.168.0.1:8080 weight=1 max_fails=2 fail_timeout=30s;
server 192.168.0.2:8080 weight=1 max_fails=2 fail_timeout=30s;
keepalive 300; // 这个很重要!
}
server {
listen 8080 default_server;
server_name "";
location / {
proxy_pass http://backend;
proxy_http_version 1.1; # 设置http版本为1.1
proxy_set_header Connection ""; # 设置Connection为长连接(默认为no)
}
}
我的知识盲点:
proxy_set_header Connection "" 必须设置这个,默认是close,导致依然是短链接
长链接的血案还可以参考:http://xiaorui.cc/archives/3495
3、 nginx worker 1的时候 qps=1.1万 2的时候 1.7万
4、proxy_set_header Connection ""
压测结果:
total used free shared buff/cache available
Mem: 1823 212 485 8 1125 1390
Swap: 0 0 0
22:19:30 up 4:47, 3 users, load average: 1.54, 0.41, 0.17
ESTABLISHED 1009 (tcp链接稳定在1000左右)
1005
5、#proxy_set_header Connection ""注释掉以后
total used free shared buff/cache available
Mem: 1823 207 490 8 1126 1394
Swap: 0 0 0
22:22:04 up 4:49, 3 users, load average: 2.32, 0.76, 0.31
CLOSE_WAIT 1
ESTABLISHED 888
FIN_WAIT1 73
FIN_WAIT2 3
TIME_WAIT 6718 (存在大量的TIME_WAIT)
927
观测脚本
[root@ycnode2 script]# cat 1.sh
#!/bin/sh
while true
do
free -m && uptime
netstat -n | awk '/^tcp/ {++S[$NF]} END {for(a in S) print a, S[a]}' #查看各种状态的TCP链接
netstat -ant |grep 8090|grep ESTABLISHED|wc -l #查看当前存在已建立的TCP链接
sleep 1
done