在制作完二维码,并且把二维码链接识别出来保存为csv文件之后(前两篇博文),现在开始爬取二维码里存储的内容。
需要爬取的url链接保存在二维码链接.csv文件中,此文件就两行url链接,如下
二维码里存储的数据如下
我们可以看一下这个网页的源文件
爬虫首先找到body标签(获取表格里的信息),然后在body标签中解析tr标签(获得每一行的信息),再把tr标签里的td标签找到(把相对应的信息写到ulist中)。
代码如下
import requests
from bs4 import BeautifulSoup
import bs4
def GetQrcodeText(url):
try:
r = requests.get(url,timeout = 30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "获取网页异常"
def FillQrcodeList(qlist,html):
soup = BeautifulSoup(html,'html.parser')
for tr in soup.find('body',{'class' : 'typora-export os-windows'}).children:
if isinstance(tr,bs4.element.Tag):
tds = tr('td')
qlist.append([tds[0].string,tds[1].string,tds[2].string,tds[3].string,tds[4].string,tds[5].string])
def PrintQrcodeList(qlist,num):
tplt = "{0:{6}^6}\t{1:{6}^10}\t{2:{6}^4}\t{3:{6}^8}\t{4:{6}^10}\t{5:{6}^4}"
#print(tplt.format("兔舍","笼牌号","耳号","品系","疫苗种类","接种时间",chr(12288)))
#表头打印会有重复
for i in range(num):
q = qlist[i]
print(tplt.format(q[0],q[1],q[2],q[3],q[4],q[5],chr(12288)))
def main():
with open("F:\二维码链接.csv") as f:
while True:
line = f.readline() #逐行读取
if not line:
break
url = line.rstrip()
#在循环中进行
qinfo = []
html = GetQrcodeText(url)
FillQrcodeList(qinfo,html)
PrintQrcodeList(qinfo,1)
main()
运行结果如下: