淘宝服务器架构框架图,简单实现....

33 篇文章 0 订阅
8 篇文章 0 订阅

本文出自 “峰云,就她了。” 博客,请务必保留此出处http://rfyiamcool.blog.51cto.com/1030776/887983

这几天闲着没事从老男孩老师看到一个淘宝网的框架图,挺感兴趣的,cdn和集群线上的架构我都做过,但是没有接触过这么大的环境,先简单的实现看看,   当然了真正的淘宝架构肯定不能像我这样的,但是自己过过实验瘾也挺爽的。

陆续的把脚本贴出来。。。

脚本有不严谨的地方,请大家指出。。。。。

脚本的ip貌似和图上都对不上,自己修改和增加吧~~~~~~~~~




说实话,lvs配置是最简单,没什么好配置的,集群环境我用lvs较少,因为没有正则的功能,当然了他作为4层的东西,优势在于大流量的承载转发。

  
  
  1. mkdir /usr/local/src/lvs  

  2. cd /usr/local/src/lvs  

  3. wget http://www.linuxvirtualserver.org/software/kernel-2.6/ipvsadm-1.24.tar.gz  

  4. wget http://www.keepalived.org/software/keepalived-1.1.15.tar.gz  

  5. lsmod |grep ip_vs  

  6. uname -r  

  7. ln -s /usr/src/kernels/$(uname -r)/usr/src/linux  

  8. tar zxvf ipvsadm-1.24.tar.gz  

  9. cd ipvsadm-1.24  

  10. make && make install  

  11. tar zxvf keepalived-1.1.15.tar.gz  

  12. cd keepalived-1.1.15  

  13. ./configure&& make && make install  

  14. cp /usr/local/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/  

  15. cp /usr/local/etc/sysconfig/keepalived /etc/sysconfig/  

  16. mkdir /etc/keepalived  

  17. cp /usr/local/etc/keepalived/keepalived.conf /etc/keepalived/  

  18. cp /usr/local/sbin/keepalived /usr/sbin/  

  19. #you can service keepalived start|stop  

  20. #master  

  21. cat >> /usr/local/etc/keepalived/keepalived.conf <<EOF

  22. ! Configuration File for keepalived  

  23. global_defs {  

  24.   notification_email {  

  25.        rfyiamcool@163.com  

  26.   }  

  27.   notification_email_from Alexandre.Cassen@firewall.loc  

  28.   smtp_server 127.0.0.1  

  29.   router_id LVS_DEVEL  

  30. }  

  31. vrrp_instance VI_1 {  

  32.    state MASTER   # other backup

  33.    interface eth0  

  34.    virtual_router_id 51  

  35.    priority 100    #  other 90

  36.    advert_int 1  

  37.    authentication {  

  38.        auth_type PASS  

  39.        auth_pass 1111  

  40.    }  

  41.    virtual_ipaddress {  

  42.        10.10.10.88  

  43.    }  

  44. }  

  45. virtual_server 10.10.10.88 80 {  

  46.    delay_loop 6  

  47.    lb_algo rr  

  48.    lb_kind DR  

  49.    persistence_timeout 50  

  50.    protocol TCP  

  51.    real_server 10.10.10.21 80 {  

  52.        weight 3  

  53.        TCP_CHECK {  

  54.        connect_timeout 10  

  55.        nb_get_retry 3  

  56.        delay_before_retry 3  

  57.        connect_port 80  

  58.        }  

  59.    }  

  60.    real_server 10.10.10.22 80 {  

  61.        weight 3  

  62.        TCP_CHECK {  

  63.        connect_timeout 10  

  64.        nb_get_retry 3  

  65.        delay_before_retry 3  

  66.        connect_port 80  

  67.        }  

  68.    }  

  69. }  

  70. EOF  

  71. service keepalived start

咱们先把二层的haproxy搞定,ip什么的大家自己改吧。

  
  
  1. #!/bin/bash  

  2. cd /usr/local/src/  

  3. wget http://haproxy.1wt.eu/download/1.4/src/haproxy-1.4.8.tar.gz  

  4. tar zxf haproxy-1.4.8.tar.gz  

  5. cd haproxy-1.4.8  

  6. uname -a  

  7. make TARGET=linux26PREFIX=/usr/local/haproxy  

  8. make install PREFIX=/usr/local/haproxy  

  9. cat >> /usr/local/haproxy/haproxy.cfg <<EOF

  10. global  

  11.        log 127.0.0.1   local0     ###全局日志  

  12.        maxconn 4096           ###最大连接数  

  13.        chroot /usr/local/haproxy  

  14.        uid 501      ###用户ID  

  15.        gid 501      ###组ID  

  16.        daemon     ###后台运行  

  17.        nbproc 1             ###创建进程数  

  18.        pidfile /usr/local/haproxy/haproxy.pid      ###pid文件  

  19. defaults  

  20.        log     127.0.0.1       local3  

  21.        mode    http           ###支持的模式  

  22.        option httplog            ###日志格式  

  23.        option httpclose        ###请求完成后关闭http通道  

  24.        option dontlognull  

  25.        option forwardfor     ###apache日志转发  

  26.        option redispatch  

  27.        retries 2             ###重连次数  

  28.        maxconn 2000  

  29.        balance roundrobin            ###算法类型  

  30.        stats   uri     /haproxy-stats      ###状态统计页面  

  31.        #stats   auth   admin:admin       ###状态统计页面用户名密码,可选  

  32.        contimeout      5000            ###连接超时  

  33.        clitimeout      50000             ###客户端超时  

  34.        srvtimeout      50000          ###服务器超时  

  35. listen   proxy *:80          ###访问地址及端口  

  36.        option httpchk HEAD /index.html  HTTP/1.0           ###健康检查页面  

  37.    server web2 10.10.10.30:88 cookie app1inst2 check inter 2000 rise 2 fall 5  

  38.    server web2 10.10.10.31:88 cookie app1inst2 check inter 2000 rise 2 fall 5  

  39.    server web2 10.10.10.32:88 cookie app1inst2 check inter 2000 rise 2 fall 5  

  40.        server web2 10.10.10.33:88 cookie app1inst2 check inter 2000 rise 2 fall 5  

  41.    server web2 10.10.10.34:88 cookie app1inst2 check inter 2000 rise 2 fall 5  

  42.    server web2 10.10.10.35:88 cookie app1inst2 check inter 2000 rise 2 fall 5  

  43. EOF  

  44. cat >> /etc/init.d/haproxy <<EOF

  45. #! /bin/sh  

  46. set -e  

  47. PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/haproxy/sbin  

  48. PROGDIR=/usr/local/haproxy  

  49. PROGNAME=haproxy

  50. DAEMON=\$PROGDIR/sbin/\$PROGNAME  

  51. CONFIG=\$PROGDIR/\$PROGNAME.cfg  

  52. PIDFILE=\$PROGDIR/\$PROGNAME.pid  

  53. DESC="HAProxy daemon"

  54. SCRIPTNAME=/etc/init.d/\$PROGNAME  

  55. # Gracefully exit if the package has been removed.  

  56. test -x \$DAEMON || exit 0  

  57. start()  

  58. {  

  59.        echo -n "Starting \$DESC: \$PROGNAME"  

  60.        \$DAEMON -f \$CONFIG  

  61.        echo "."  

  62. }  

  63. stop()  

  64. {  

  65.        echo -n "Stopping \$DESC: \$PROGNAME"  

  66. haproxy_pid=cat /usr/local/haproxy/haproxy.pid  

  67.        kill \$haproxy_pid  

  68.        echo "."  

  69. }  

  70. restart()  

  71. {  

  72.        echo -n "Restarting \$DESC: \$PROGNAME"  

  73.        \$DAEMON -f \$CONFIG -p \$PIDFILE -sf \$(cat \$PIDFILE)  

  74.        echo "."  

  75. }  

  76. case "\$1" in  

  77.  start)  

  78.        start  

  79.        ;;  

  80.  stop)  

  81.        stop  

  82.        ;;  

  83.  restart)  

  84.        restart  

  85.        ;;  

  86.  *)  

  87.        echo "Usage: \$SCRIPTNAME {start|stop|restart}" >&2  

  88.        exit 1  

  89.        ;;  

  90. esac  

  91. exit 0  

  92. EOF  

  93. chmod +x /etc/rc.d/init.d/haproxy  

  94. chkconfig --add haproxy  

  95. chmod 777 /usr/local/haproxy/haproxy.pid  

  96. sed -i '/SYSLOGD_OPTIONS/c\SYSLOGD_OPTIONS="-r -m 0"' /etc/sysconfig/syslog  

  97. echo "local3.*        /var/log/haproxy.log" /etc/syslog.conf  

  98. echo "local0.*        /var/log/haproxy.log" /etc/syslog.conf  

  99. service syslog restart  

  100. #启动haproxy  

  101. # /usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/haproxy.cfg  

  102. #重启haproxy  

  103. # /usr/local/haproxy/sbin/haproxy -f /usr/local/haproxy/haproxy.cfg -st `cat /usr/local/haproxy/haproxy.pid`  

  104. #停止haproxy  

  105. # killall haproxy  

  106. # service haproxy start restart stop

haproxy是识别主机名的判断的 主机名的判断的例子格式如下:

  
  
  1. acl url_aaa    hdr_dom(host)    www.aaa.com

  2. acl url_bbb    hdr_dom(host)    www.bbb.com

  3. acl tm_policy             hdr_dom(host) -i trade.gemini.taobao.net

  4. acl denali_policy         hdr_reg(host) -i ^(my.gemini.taobao.net|auction1.gemini.taobao.net)$

  5. acl path_url163  path_beg  -i /163

  6. acl path_url_bbb path_beg  -i /

  7. use_backend aaa      if url_aaa

  8. use_backend bbb      if url_bbb

  9. use_backend url163   if url_aaa path_url163

  10. backend url163

  11.   mode http

  12.   balance roundrobin

  13.   option httpchk GET /163/test.jsp

  14.   server url163 10.10.10.31:8080 cookie 1 check inter 2000 rise 3 fall 3 maxconn 50000

  15. backend aaa

  16.   mode http

  17.   balance roundrobin

  18.   option httpchk GET /test.jsp

  19.   srever app_8080 10.10.10.32:8080 cookie 1 check inter 1500 rise 3 fall 3 maxconn 50000

  20. backend bbb

  21.   mode http

  22.   balance roundrobin

  23.   option httpchk GET /test.jsp

  24.   srever app_8080 10.10.10.33:8090 cookie 1 check inter 1500 rise 3 fall 3 maxconn 50000

haproxy端还要做lvs客户端模式,绑定回环口。

  
  
  1. #!/bin/bash  

  2. SNS_VIP=10.10.10.88  

  3. source /etc/rc.d/init.d/functions  

  4. case "$1" in  

  5. start)  

  6.       ifconfig lo:0 $SNS_VIP netmask 255.255.255.255 broadcast $SNS_VIP  

  7.       /sbin/route add -host $SNS_VIP dev lo:0  

  8.       echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore  

  9.       echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce  

  10.       echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore  

  11.       echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce  

  12.       echo "RealServer Start OK"  

  13.       ;;  

  14. stop)  

  15.       ifconfig lo:0 down  

  16.       route del $SNS_VIP >/dev/null 2>&1  

  17.       echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore  

  18.       echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce  

  19.       echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore  

  20.       echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce  

  21.       echo "RealServer Stoped"  

  22.       ;;  

  23. *)  

  24.       echo "Usage: $0 {start|stop}"  

  25.       exit 1  

  26. esac  

  27. exit 0  

下面是squid的设置

  
  
  1. #!/bin/bash  

  2. wget  http://www.squid-cache.org/Versions/v2/2.6/squid-2.6.STABLE6.tar.bz2  

  3. tar jxvf squid-2.6.STABLE6.tar.bz2  

  4. ./configure --prefix=/usr/local/squid \  

  5. --enable-async-io=320 \  

  6. --enable-storeio="aufs,diskd,ufs" \  

  7. --enable-useragent-log \  

  8. --enable-referer-log \  

  9. --enable-kill-parent-hack \  

  10. --enable-forward-log \  

  11. --enable-snmp \  

  12. --enable-cache-digests \  

  13. --enable-default-err-language=Simplify_Chinese \  

  14. --enable-epoll \  

  15. --enable-removal-policies="heap,lru" \  

  16. --enable-large-cache-files \  

  17. --disable-internal-dns \  

  18. --enable-x-accelerator-vary \  

  19. --enable-follow-x-forwarded-for \  

  20. --disable-ident-lookups \  

  21. --with-large-files \  

  22. --with-filedescriptors=65536

  23. cat >> /usr/local/squid/etc/squid.conf <<EOF

  24. visible_hostname cache1.taobao.com  

  25. http_port 192.168.1.44:80 vhost vport  

  26. icp_port 0  

  27. cache_mem 512 MB  

  28. cache_swap_low 90  

  29. cache_swap_high 95  

  30. maximum_object_size 20000 KB  

  31. maximum_object_size_in_memory 4096 KB  

  32. cache_dir ufs /tmp1 3000 32 256  

  33. cache_store_log none  

  34. emulate_httpd_log on  

  35. efresh_pattern ^ftp:           1440    20%     10080  

  36. refresh_pattern ^gopher:        1440    0%      1440  

  37. refresh_pattern .               0       20%     4320  

  38. negative_ttl 5 minutes  

  39. positive_dns_ttl 6 hours  

  40. negative_dns_ttl 1 minute  

  41. connect_timeout 1 minute  

  42. read_timeout 15 minutes  

  43. request_timeout 5 minutes  

  44. client_lifetime 1 day  

  45. half_closed_clients on  

  46. maximum_single_addr_tries 1  

  47. uri_whitespace strip  

  48. ie_refresh off  

  49. logformat combined %>a %ui %un [%tl] "%rm %ru HTTP/%rv" %Hs %<st "%{Referer}>h" "%{User-Agent}>h" %Ss:%Sh  

  50. pid_filename /var/log/squid/squid.pid  

  51. cache_log /var/log/squid/cache.log  

  52. access_log /var/log/squid/access.log combined  

  53. acl all src 0.0.0.0/0.0.0.0  

  54. acl QUERY urlpath_regex cgi-bin .php .cgi .avi .wmv .rm .ram .mpg .mpeg .zip .exe  

  55. cache deny QUERY  

  56. acl picurl url_regex -i \.bmp$ \.png$ \.jpg$ \.gif$ \.jpeg$  

  57. acl mystie1 referer_regex -i aaa  

  58. http_access allow mystie1 picurl  

  59. acl mystie2 referer_regex -i bbb  

  60. http_access allow mystie2 picurl  

  61. acl nullref referer_regex -i ^$  

  62. http_access allow nullref  

  63. acl hasref referer_regex -i .+  

  64. http_access deny hasref picurl  

  65. cache_peer 192.168.1.7 parent 80 0 no-query originserver no-digest name=all

  66. cache_peer_domain all *.taobao.com  

  67. cache_effective_user nobody  

  68. cache_effective_group nobody  

  69. acl localhost src 127.0.0.1  

  70. acl my_other_proxy srcdomain .a.com  

  71. follow_x_forwarded_for allow localhost  

  72. follow_x_forwarded_for allow all   #允许转发 head ip 头  

  73. acl_uses_indirect_client on     #只有2.6才有这这个个参数  

  74. delay_pool_uses_indirect_client on  #只有2.6才有这这个个参数  

  75. log_uses_indirect_client on    # 只有2.6才有这这个个参数  

  76. #refresh_pattern ^ftp: 60 20% 10080  

  77. #refresh_pattern ^gopher: 60 0% 1440  

  78. #refresh_pattern ^gopher: 60 0% 1440  

  79. #refresh_pattern . 0 20% 1440  

  80. refresh_pattern -i \.js$        1440    50%     2880   reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  81. refresh_pattern -i \.html$      720     50%     1440    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  82. refresh_pattern -i \.jpg$       1440    90%     2880    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  83. refresh_pattern -i \.gif$       1440    90%     2880    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  84. refresh_pattern -i \.swf$       1440    90%     2880    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  85. refresh_pattern -i \.jpg$       1440    50%     2880    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  86. refresh_pattern -i \.png$       1440    50%     2880     reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private  

  87. refresh_pattern -i \.bmp$       1440    50%     2880      reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private  

  88. refresh_pattern -i \.doc$       1440    50%     2880       reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private  

  89. refresh_pattern -i \.ppt$       1440    50%     2880      reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private  

  90. refresh_pattern -i \.xls$       1440    50%     2880    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  91. refresh_pattern -i \.pdf$       1440    50%     2880   reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private      

  92. refresh_pattern -i \.rar$       1440    50%     2880    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  93. refresh_pattern -i \.zip$       1440    50%     2880    reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private    

  94. refresh_pattern -i \.txt$       1440    50%     2880   reload-into-ims ignore-reload ignore-no-cache ignore-auth ignore-private

  95. EOF  

  96. #建立缓存和日志目录,并改变权限使squid能写入  

  97. mkdir /tmp1  

  98. mkdir /var/log/squid  

  99. chown -R nobody:nobody /tmp1  

  100. chmod 666 /tmp1  

  101. chown -R nobody:nobody /var/log/squid  

  102. #首次运行squid要先建立缓存  

  103. /usr/local/squid/sbin/squid -z  

  104. #启动squid  

  105. echo "65535" > /proc/sys/fs/file-max  

  106. ulimit -HSn 65535  

  107. /usr/local/squid/sbin/squid

缓存的清理脚本是从洒哥那里搞到的

只是根据洒哥的脚本很简单的延伸了下,以前那个分享的脚本可以去除域名和特定的文件格式,然后我就想了能不能去除一个网址的所有jpg    或者是 www.92hezu.com/123/bbb/ 这样的。  原来多家几个后缀,用grep过滤就ok了

qingli.sh      www.xiuxiukan.com

qingli.sh      jpg

qingli.sh       xiuxiukan.com 123  bbb  jpg


  
  
  1. #!/bin/sh

  2. squidcache_path="/squidcache"

  3. squidclient_path="/home/local/squid/bin/squidclient"

  4. #grep -a -r $1 $squidcache_path/* | grep "http:" | awk -F 'http:' '{print "http:"$2;}' | awk -F\' '{print $1}' > cache.txt

  5. if [[ "$1" == "swf" || "$1" == "png" || "$1" == "jpg" || "$1" == "ico" || "$1" == "gif" || "$1" == "css" || "$1" == "js" || "$1" == "html" || "$1" == "shtml" || "$1" == "htm"   ]]; then

  6. grep -a -r .$1 $squidcache_path/* | strings | grep "http:" | awk -F 'http:' '{print "http:"$2;}' | awk -F\' '{print $1}' | grep "$1$" | uniq > cache.txt

  7. else

  8. grep -a -r $1 $squidcache_path/* | strings | grep "http:" |grep $2$ |grep $3$|grep $4$|grep $5$ |grep $6$| awk -F 'http:' '{print "http:"$2;}' | awk -F\' '{print $1}' | uniq > cache.txt

  9. fi

  10. sed -i "s/\";$//g" cache.txt

  11. cat cache.txt | while read LINE

  12. do

  13. $squidclient_path -p 80 -m PURGE $LINE

  14. done


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值