由于有些网站不希望机器人去抓取它的数据,所以直接用urlopen方法时可能会出现无返回值的现象,一下这个函数采用模拟火狐浏览器访问访问的方式可获得网页源码。
import urllib2
def getUrlRespHtml(url):
heads = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset':'GB2312,utf-8;q=0.7,*;q=0.7',
'Accept-Language':'zh-cn,zh;q=0.5',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Host':'John',
'Keep-Alive':'115',
'Referer':url,
'User-Agent':'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.14) Gecko/20110221 Ubuntu/10.10 (maverick) Firefox/3.6.14'}
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
urllib2.install_opener(opener)
req = urllib2.Request(url)
opener.addheaders = heads.items()
respHtml = opener.open(req).read()
return respHtml.decode('gbk').encode('utf-8')
html = getUrlRespHtml("http://mil.news.baidu.com/")
print(html)