一个简单的网页抓取的例子,这里伪装为浏览器操作的。
import urllib2
import socket
import time
def getHttpContent(path):
while True:
try:
i_headers = {"User-Agent": "User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48",\
"Referer": path}
req = urllib2.Request(path, headers=i_headers)
sock = urllib2.urlopen(req)
content = sock.read()
sock.close()
return content
except urllib2.HTTPError,e:
print e
if e.code == 403:
sys.exit()
return None
except urllib2.URLError,e:
print e
print time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
time.sleep(60)
except socket.error,e:
print e
print time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
print path
return None
except HTTPException,e:
print e
print time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
print path
return None
return None
if __name__ == '__main__':
path_list = ['http://blog.csdn.net/yangqq2013/article/details/22072865','http://blog.csdn.net/yangqq2013/article/details/22072865']
for i in range(1,100):
print str(i)+"++++++++++++++++++++++++++++++++"
for path in path_list:
print path
getHttpContent(path)
time.sleep(1)