爬虫 禁止访问时伪造字符头

# -*- coding: utf-8 -*-
import urllib2
import random
url="http://blog.csdn.net/u013256816"
# my_headers={"Host":"blog.csdn.net",
#     "Referer":"http://blog.csdn.net/",
#     "User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.154 Safari/537.36 LBBROWSER",
#     "GET":url
#     }
# req=urllib2.Request(url,headers=my_headers)
# html=urllib2.urlopen(req)
# print html.read()
#
# print req.header_items()

"""每次用不一样的头部信息
    代理IP,假的用户头部信息
"""
myHeader=["Mozilla/4.0 (compatible; MSIE 5.0; Windows NT)",
          "Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3",
          "Opera/8.0 (Macintosh; PPC Mac OS X; U; en)",
          "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6"
          "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; 360SE)"
         " Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; Maxthon/3.0)"
          ]
def getContent(url,headers):
    """
    @获取403精致访问的网页
    :param url:
    :param headers:
    :return:
    """

    random_header=random.choice(headers)
    print random_header
    req=urllib2.Request(url)
    req.add_header("User-Agent",random_header)
    req.add_header("Host","blog.csdn.net")
    req.add_header("Referer","http://blog.csdn.net/")
    req.add_header("GET",url)
    content=urllib2.urlopen(req).read()
    return  content

print  getContent(url,myHeader)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值