#coding:utf-8
import urllib2
import bs4
from bs4 import BeautifulSoup
class YZBZ():
#初始化方法
def __init__(self):
self.pageIndex = 1
self.user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
self.headers = { 'User-Agent' : self.user_agent }
#获取页面代码
def getHtml(self,pageIndex):
#异常处理,如果网页连接失败,输出相应内容
try:
url='http://db.yaozh.com/biaozhun?p='+ str(pageIndex)
#构建请求的request
request=urllib2.Request(url,headers=self.headers)
page=urllib2.urlopen(request)
html=page.read()
return html
except urllib2.URLError,e:
if hasattr(e,"reason"):
print u"连接失败",e.reason
return None
#匹配相关内容并写入文件
#主要思路:通过分析源代码,用BS匹配所有tr,然后写入第一页的标题和内容,其他页则只写入内容无标题
def getItems(self):
#一共11页,依次进行遍历
L=range(1,11)
for pageIndex in L:
html=self.getHtml(pageIndex)
soup=BeautifulSoup(html)
tr_list=soup.find_all("tr")
#第一页标题和内容
if pageIndex==1:
L1=range(0,21)
for item1 in L1:
with open(r"D:\Python test\yaozhi_biaozhun.txt",'a+') as a:
a.write(tr_list[item1].get_text("|",strip=True).encode("utf-8")+"|"+"\n")
#其他页的内容
else:
L2=range(1,21)
for item2 in L2:
a=open(r"D:\Python test\yaozhi_biaozhun.txt","a+")
a.write(tr_list[item2].get_text("|",strip=True).encode("utf-8")+"|"+"\n")
a.close()
spider=YZBZ()
spider.getItems()
使用BeautifulSoup爬取药智标准网的数据
最新推荐文章于 2022-03-10 19:03:18 发布