import requests as res
import time
from pandas import DataFrame
from bs4 import BeautifulSoup as bs
(name,price,comp,date,ad) = ([],[],[],[],[])
for page in range(1,10):
if page == 1:
url = 'http://www.autoho.com/sca_list.asp?id=11&scasort='
else:
url = 'http://www.autoho.com/sca_list.asp?id=11&scasort=&page={}'.format(page)
head = {'User-Agent': 'Mozilla/6.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'}
html = res.get(url,headers = head)
html.encoding = 'utf-8' #如果用gbk会怎样
soup = bs(html.text,'html.parser')
time.sleep(4)
for i in soup.find('table',id="dlProduct", style="WIDTH: 100%; BORDER-COLLAPSE: collapse" ,cellspacing="0" ,border="0").findAll('table'):
tname = i.strong.find('font').string.strip()
name.append(tname) # 产品名称
# print(name)
try:
tprice = i.find('font',style="FONT-WEIGHT: bold; FONT-SIZE: 13px; FONT-FAMILY: arial",color="#ff6600").string
price.append(tprice) #---价格
except:
tprice='NONE'
price.append(tprice)
# print(price)
try:
tcomp = i.find('td', width="293").findAll('font', color="001D4C")[1].string
comp.append(tcomp) #制造商
except:
tcomp='NONE'
comp.append(tcomp)
# print(comp)
try:
tdate = i.find('td', width="293").findAll('font', color="666666")[0].string
date.append(tdate) #发布日期
except:
tdate='NONE'
date.append(tdate)
try:
tad = i.find('td', width="293").findAll('font', color="666666")[1].string.strip('[').strip(']')
ad.append(tad) #地址
except:
tad='NONE'
ad.append(tad)
print('已爬取第%d页'%page)
dict1 = {'产品名称':name,'产品价格':price,'制造商':comp,'发布日期':date,'地址':ad}
df = DataFrame(dict1)
df.to_csv(r'C:\Users\ANT\Desktop\中国汽配数据完整.csv',encoding='gbk')
df