系统:win10x64+python3.6
注意事项:
webdriver特别是chromedriver的版本要与自己浏览器的版本一致。
time.sleep()是为了保证爬取成功,不然页面会一闪而过,什么都爬不到。
各个模块的下载安装请自行百度。
原网址已加密,需要使用pandas中的read_html函数读取表格内容。
import time
import pandas as pd
import requests
from bs4 import BeautifulSoup
from urllib import parse
from selenium import webdriver
driver = webdriver.Chrome('D:\ProgramData\Anaconda3\selenium\chromedriver.exe')
base_url = 'https://www.aqistudy.cn/historydata/daydata.php?city='
str_city = '北京'
def get_month_set():
month_set = list()
for i in range(1, 10):
month_set.append(('2014-0%s' % i))
for i in range(10, 13):
month_set.append(('2014-%s' % i))
for i in range(1, 10):
month_set.append(('2015-0%s' % i))
for i in range(10, 13):
month_set.append(('2015-%s' % i))
for i in range(1, 10):
month_set.append(('2016-0%s' % i))
for i in range(10, 13):
month_set.append(('2016-%s' % i))
for i in range(1, 10):
month_set.append(('2017-0%s' % i))
for i in range(10, 13):
month_set.append(('2017-%s' % i))
return month_set
def get_city_set():
str_file = r'city.txt'
fp = open(str_file,'rb')
city_set = list()
for line in fp.readlines():
city_set.append(str(line.strip(),encoding='utf-8'))
return city_set
month_set = get_month_set()
city_set = get_city_set()
for city in city_set:
file_name = city + '.csv'
fp = open('aqi/' + file_name, 'w')
fp.write('%s,%s,%s,%s,%s,%s,%s,%s,%s\n'%('日期','AQI','质量等级','PM25','PM10','SO2','CO','NO2','O3_8h'))#表头
for i in range(len(month_set)):
str_month = month_set[i]
utf8_city = parse.quote(city)
weburl = ('%s%s&month=%s' % (base_url,utf8_city,str_month))
driver.get(weburl)
time.sleep(1)
dfs = pd.read_html(driver.page_source,header = 0)[0]
for j in range(0,len(dfs)):
date = dfs.iloc[j,0]
aqi = dfs.iloc[j,1]
grade = dfs.iloc[j,2]
pm25 = dfs.iloc[j,3]
pm10 = dfs.iloc[j,4]
so2 = dfs.iloc[j,5]
co = dfs.iloc[j,6]
no2 = dfs.iloc[j,7]
o3 = dfs.iloc[j,8]
fp.write(('%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % (date,aqi,grade,pm25,pm10,so2,co,no2,o3)))
print('%d---%s,%s---DONE' % (city_set.index(city), parse.quote(city), str_month))
fp.close()
driver.quit()