#爬取12365auto投诉信息
#导入request、BeautifulSoup、Pandas
import requests
from bs4 import BeautifulSoup
import pandas as pd
#url = 'http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-1.shtml'
#得到网页信息
def get_page_soup(url):
#headers设置
headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}
#设置间隔时间3秒
request = requests.get(url,headers=headers,timeout=3)
content = request.text
#将content放入BeautifulSoup的“物件”中
soup = BeautifulSoup(content,features ='html.parser',from_encoding='utf-8')
return soup
#print(get_page_soup(url)
#提取具体内容
def get_page_content(soup):
temp = soup.find('div',class_= 'tslb_b')
df = pd.DataFrame(columns = ['id', 'brand', 'car_model', 'type', 'desc', 'problem', 'datetime', 'status'])
tr_list = temp.find_all('tr')
for tr in tr_list:
temp = {}
td_list = tr.find_all('td')
if td_list == []:
continue
temp['id'],temp['brand'],temp['car_model'],temp['type'],temp['desc'],temp['problem'],temp['datetime'],temp['status'] = td_list[0].text,td_list[1].text,td_list[2].text,td_list[3].text,td_list[4].text,td_list[5].text,td_list[6].text,td_list[7].text
df = df.append(temp,ignore_index=True)
return df
#soup = get_page_soup(url)
#print(get_page_content(soup))
page_num= 50
base_url='http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-'
result = pd.DataFrame(columns = ['id', 'brand', 'car_model', 'type', 'desc', 'problem', 'datetime', 'status'])
for i in range(page_num):
url = base_url + str(i+1)+'.shtml'
soup = get_page_soup(url)
content = get_page_content(soup)
result = result.append(content)
result.to_csv('car_com.csv',index = False)
python 爬取12365auto投诉信息
最新推荐文章于 2021-04-10 01:22:15 发布