存储+调优: 调优四-networking
[root@localhost ~]# sar -n DEV 1 1
Linux 2.6.18-194.el5 (localhost.localdomain) 04/04/2012
06:16:33 PM IFACE rxpck/s txpck/s rxbyt/s txbyt/s rxcmp/s txcmp/s rxmcst/s
06:16:34 PM lo 0.00 0.00 0.00 0.00 0.00 0.00 0.00
06:16:34 PM eth0 0.00 0.00 0.00 0.00 0.00 0.00 0.00
06:16:34 PM eth1 0.00 0.00 0.00 0.00 0.00 0.00 0.00
06:16:34 PM sit0 0.00 0.00 0.00 0.00 0.00 0.00 0.00
Average: IFACE rxpck/s txpck/s rxbyt/s txbyt/s rxcmp/s txcmp/s rxmcst/s
Average: lo 0.00 0.00 0.00 0.00 0.00 0.00 0.00
Average: eth0 0.00 0.00 0.00 0.00 0.00 0.00 0.00
Average: eth1 0.00 0.00 0.00 0.00 0.00 0.00 0.00
Average: sit0 0.00 0.00 0.00 0.00 0.00 0.00 0.00
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# sar -n SOCK 1 1
Linux 2.6.18-194.el5 (localhost.localdomain) 04/04/2012
06:16:47 PM totsck tcpsck udpsck rawsck ip-frag
06:16:48 PM 443 7 6 0 0
Average: 443 7 6 0 0
网络读缓存(全局)
[root@localhost ~]# cat /proc/sys/net/core/rmem_
rmem_default rmem_max
[root@localhost Documentation]# pwd
/usr/share/doc/kernel-doc-2.6.18/Documentation
[root@localhost Documentation]# grep -rl rmem_default *
filesystems/proc.txt
networking/ip-sysctl.txt
networking/ixgb.txt
networking/cxgb.txt
[root@localhost Documentation]# less filesystems/proc.txt
rmem_default
------------
The default setting of the socket receive buffer in bytes.
rmem_max
--------
The maximum receive socket buffer size in bytes.
不会增加网络I/O,有可能提高应用程序的读速度、写速度、响应速度
TCP
[root@localhost Documentation]# cat /proc/sys/net/ipv4/tcp_rmem
4096 87380 4194304
[root@localhost Documentation]# cat /proc/sys/net/ipv4/tcp_wmem
4096 16384 4194304
[root@localhost Documentation]# grep -rl tcp_wmem *
networking/ip-sysctl.txt
networking/ixgb.txt
networking/s2io.txt
networking/cxgb.txt
[root@localhost Documentation]# less networking/ip-sysctl.txt
tcp_wmem - vector of 3 INTEGERs: min, default, max
min: Amount of memory reserved for send buffers for TCP socket.
Each TCP socket has rights to use it due to fact of its birth.
Default: 4K
default: Amount of memory allowed for send buffers for TCP socket
by default. This value overrides net.core.wmem_default used
by other protocols, it is usually lower than net.core.wmem_default.
Default: 16K
max: Maximal amount of memory allowed for automatically selected
send buffers for TCP socket. This value does not override
net.core.wmem_max, "static" selection via SO_SNDBUF does not use this.
Default: 128K
连接状态
[root@localhost Documentation]# cat /proc/sys/net/ipv4/ip_conntrack_max
65536
[root@localhost Documentation]# lsmod | grep ip_conn
ip_conntrack 53281 2 iptable_nat,ip_nat
nfnetlink 10713 2 ip_nat,ip_conntrack
ip分片
[root@localhost Documentation]# cat /proc/sys/net/ipv4/ipfrag_
ipfrag_high_thresh ipfrag_low_thresh ipfrag_max_dist ipfrag_secret_interval ipfrag_time
[root@localhost Documentation]# cat /proc/sys/net/ipv4/ipfrag_high_thresh
262144
[root@localhost Documentation]# cat /proc/sys/net/ipv4/ipfrag_low_thresh
196608
[root@localhost Documentation]# cat /proc/sys/net/ipv4/ipfrag_time
30
网络
sar -n DEV 1 100
iptraf
[root@localhost ~]# tc qdisc show
qdisc pfifo_fast 0: dev eth0 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1
[root@localhost ~]# tc qdisc add dev eth0 root tbf help
Usage: ... tbf limit BYTES burst BYTES[/BYTES] rate KBPS [ mtu BYTES[/BYTES] ]
[ peakrate KBPS ] [ latency TIME ]
[root@localhost ~]# tc qdisc add dev eth0 root tbf limit 20k burst 10s rate 2mbit
[root@localhost ~]# ethtool eth0
Settings for eth0:
Supported ports: [ TP ]
Supported link modes: 1000baseT/Full
Supports auto-negotiation: No
Advertised link modes: Not reported
Advertised auto-negotiation: No
Speed: 1000Mb/s
Duplex: Full
Port: Twisted Pair
PHYAD: 0
Transceiver: internal
Auto-negotiation: off
Link detected: yes
查看硬件信息
lspci
dmidecode
Ethernet Channel Bonding
我们在这介绍的Linux双网卡绑定实现就是使用两块网卡虚拟成为一块网卡,这个聚合起来的设备看起来是一个单独的以太网接口设备,通俗点讲就是两块网卡具有相同的IP地址而并行链接聚合成一个逻辑链路工作。其实这项技术在Sun和Cisco中早已存在,被称为Trunking和Etherchannel技术,在Linux的2.4.x的内核中也采用这这种技术,被称为bonding。bonding技术的最早应用是在集群——beowulf上,为了提高集群节点间的数据传输而设计的。下面我们讨论一下bonding 的原理,什么是bonding需要从网卡的混杂(promisc)模式说起。我们知道,在正常情况下,网卡只接收目的硬件地址(MAC Address)是自身Mac的以太网帧,对于别的数据帧都滤掉,以减轻驱动程序的负担。但是网卡也支持另外一种被称为混杂promisc的模式,可以接收网络上所有的帧,比如说tcpdump,就是运行在这个模式下。bonding也运行在这个模式下,而且修改了驱动程序中的mac地址,将两块网卡的Mac地址改成相同,可以接收特定mac的数据帧。然后把相应的数据帧传送给bond驱动程序处理。
vim /etc/sysconfig/network-scripts/ifcfg-bond0
DEVICE=bond0
BOOTPROTO=none
ONBOOT=yes
IPADDR=192.168.0.20
NETMASK=255.255.255.0
BROADCAST=192.168.0.255
TYPE=Ethernet
vim /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
BOOTPROTO=none
ONBOOT=yes
HWADDR=00:0c:29:53:c0:83
vim /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
BOOTPROTO=none
ONBOOT=yes
HWADDR=00:0c:29:53:c0:8d
vim /etc/modprobe.conf
alias bond0 bonding
options bond0 miimon=100 mode=1
miimon是用来进行链路监测的。 比如:miimon=100,那么系统每100ms监测一次链路连接状态,如果有一条线路不通就转入另一条线路;mode的值表示工作模式,他共有
0,1,2,3四种模式,常用的为0,1两种。
mode=0表示load balancing (round-robin)为负载均衡方式,两块网卡都工作。
mode=1表示fault-tolerance (active-backup)提供冗余功能,工作方式是主备的工作方式,也就是说默认情况下只有一块网卡工作,另一块做备份.
bonding只能提供链路监测,即从主机到交换机的链路是否接通。如果只是交换机对外的链路down掉了,而交换机本身并没有故障,那么bonding会认为链路没有问题而继续使用
/etc/rc.local
ifenslave bond0 eth0 eth1
less /usr/share/doc/kernel-doc-2.6.18/Documentation/networking/bonding.txt
/etc/modprobe.conf
/proc/net/bond0/info
RHEL5
rpm -q kernel-doc
/usr/share/doc/kernel-doc-2.6.18/Documentation/networking/bonding.txt
vim /etc/modprobe.conf
---
alias bond0 bonding
options bonding miimon=100 mode=balance-rr
---
/etc/sysconfig/network-scripts/ifcfg-ethX
---
DEVICE=ethX
USERCTL=no
ONBOOT=yes
MASTER=bond0
SLAVE=yes
BOOTPROTO=none
---
/etc/sysconfig/network-scripts/ifcfg-bond0
-----------
DEVICE=bond0
IPADDR=192.168.0.254
NETMASK=255.255.255.0
ONBOOT=yes
BOOTPROTO=none
USERCTL=no
-----------
service network restart