导入urllib模块
import urllib.request
#导入bs4模块
from bs4 import BeautifulSoup
#导入pandas模块
import pandas as pd
def gethtml(url):
#伪装浏览器
h = {
‘User-Agent’: ‘Mozilla / 5.0(Windows NT 6.1;WOW64)’
}
r = urllib.request.Request(url, headers=h)
# 向服务器发送请求
response = urllib.request.urlopen®
# 读取网页内容
html = response.read().decode()
#print(html)
# 转化网络文件
soup = BeautifulSoup(html,‘lxml’)
ct = soup.find(“table”,class_=“hq_table”)
ct1 = ct.find_all(“tr”)
#来获取csv标题
title = []
tit = ct1[0].find_all(“td”)
for i in tit:
a = i.string
title.append(a)
#print(a)
reslist = []
#获取数据
data = ct1[1:]
for j in data:
tem = j.find_all(“td”)
item = []
for k in tem:
item.append(k.string)
reslist.append(item)
#print(reslist)
# 存储为csv
xfd = pd.DataFrame(reslist,columns=title)
xfd.to_csv(“新发地肉禽蛋信息.csv”)
#10页爬取
for l in range(1,10):
gethtml(‘http://www.xinfadi.com.cn/marketanalysis/3/list/{}.shtml’.format(l))