目标网站:天气后报网(www.tianqihoubao.com)
数据存储方式:csv文件本地存储
import requests
from lxml import etree
import csv
if __name__ == '__main__':
url = 'http://www.tianqihoubao.com/aqi/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.204 Safari/537.36'
}
page_text = requests.get(url=url, headers=headers).text
tree = etree.HTML(page_text)
city_list = tree.xpath('//div[@class="citychk"]/dl/dd')
city_list = city_list[1:]
for dd in city_list:
# 获取城市名称和href和url
city_name = dd.xpath('./a/text()')[0]
city_name = city_name.encode('iso-8859-1').decode('gbk')
# print(city_name)
city_href = dd.xpath('./a/@href')[0]
city_url = 'http://www.tianqihoubao.com' + city_href
# 获取url响应数据
city_AQI = requests.get(url=city_url, headers=headers).text
city_tree = etree.HTML(city_AQI)
city_aqi_list = city_tree.xpath('//div[@class="box p"]//li')[4:16]
# print(city_aqi_list)
for li in city_aqi_list:
# aqi每个月的url获取
li_name = li.xpath('./a/@title')[0]
li_href = li.xpath('./a/@href')[0]
li_url = 'http://www.tianqihoubao.com' + li_href
# 获取每个月aqi详细数据
li_page = requests.get(url=li_url, headers=headers).text
li_tree = etree.HTML(li_page)
li_list = li_tree.xpath('//div[@class="api_month_list"]//tr')
# 获取每天详细数据
title = li_list[0]
# print(li_list)
list = [['日期\t','质量等级\t','AQI指数\t','PM2.5\t','PM10\t','So2\t','No2\t','Co\t','O3\n']]
path = city_name + li_name + '.csv'
with open(path, 'w', encoding='utf-8',newline='') as fp:
for data in li_list[1:]:
# 获取详细数据
list_data= []
data_name = str(data.xpath('./td[1]/text()')[0])
data_name = data_name.strip()
list_data.append(data_name)
grade = str(data.xpath('./td[2]/text()')[0])
grade = grade.strip()
list_data.append(grade)
AQI = str(data.xpath('./td[3]/text()')[0])
AQI = AQI.strip()
list_data.append(AQI)
PM25 = data.xpath('./td[5]/text()')[0]
list_data.append(PM25)
PM10 = data.xpath('./td[6]/text()')[0]
list_data.append(PM10)
So2 = data.xpath('./td[7]/text()')[0]
list_data.append(So2)
No2 = data.xpath('./td[8]/text()')[0]
list_data.append(No2)
Co = data.xpath('./td[9]/text()')[0]
list_data.append(Co)
O3 = data.xpath('./td[10]/text()')[0]
list_data.append(O3)
list.append(list_data)
# fp.write(data_name)
# print('over!!')
# 持久化存储
rows = zip(list)
writer = csv.writer(fp)
for row in list:
writer.writerow(row)
print(li_name, 'over!!!')