#! /usr/bin/python
# -*- coding:utf-8 -*-
'''
Created on 2013-11-5
@author: Java
'''
import urllib2
import time
import socket
from sgmllib import SGMLParser
class WebUtil():
def __init__(self):
self.trytims = 3
pass
#读取Url 内容
# timeout=10
# socket.setdefaulttimeout(timeout)#这里对整个socket层设置超时时间。后续文件中如果再使用到socket,不必再设置
# sleep_download_tine=10
# time.sleep(sleep_download_tine)
def readUrl(self,url):
try:
request = urllib2.Request(url,headers = {'User-Agent':'Magic Browser'})
webpage = urllib2.urlopen(url)
content = webpage.read()
return content
request.close()
except Exception,errmg:
print '读取失败:%s'%errmg
return None
if __name__=='__main__':
web = WebUtil()
content = web.readUrl('http://www.haodf.com/doctor/DE4rO-XCoLUOzseHcTieBvzKOb.htm')
print content
python根据url获取网页内容
最新推荐文章于 2024-06-02 18:54:26 发布