与上一篇博客处理方式类似,爬取全国2453个地区的天气,并存为Excel。
简便起见,只解析出了今天的天气。
有兴趣的读者可以用同样的办法抓取7天以内的天气,
甚至一个月以内的,这都不是问题。
这里推荐一下 xlsxwriter 库,尽管 csv 更简单,但是无法控制格式。
效果如图:
附上代码:
import requests, re
from bs4 import BeautifulSoup
import xlsxwriter
import datetime
def get_URLs(start_URL):
URLs = []
html = get_html(start_url)
soup = BeautifulSoup(html.text, 'lxml')
urls = soup.find_all(class_='wd_piC')
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') # 匹配模式
for url in urls:
res = re.findall(pattern, str(url))
for u in res:
URLs.append(u)
return URLs
def get_html(url):
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
'ContentType': 'text/html; charset=utf-8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive'
}
html = requests.get(url, headers=headers, timeout=30)
html.encoding = html.apparent_encoding
return html
except:
print('获取HTML失败')
date = []
fg = True
def get_data(html):
soup = BeautifulSoup(html.text, 'lxml')
tables = soup.find_all('table')
data = []
global date, fg
if(fg):
tab = tables[0]
for tr in tab.findAll('tr'):
for td in tr.findAll('td'):
data.append(td.getText().strip())
date.append(data[2])
date.append(data[3])
data.clear()
fg = False
for tab in tables[1:]:
for tr in tab.findAll('tr'):
for td in tr.findAll('td'):
data.append(td.getText().strip())
# print(data)
return data
def createListHeader(workbook):
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True, 'font_color': 'red','align':'center'})
worksheet.write('A1', '市/县/区', bold)
worksheet.write('B1', '日期', bold)
worksheet.write('C1', '天气状况', bold)
worksheet.write('D1', '风力方向', bold)
worksheet.write('E1', '最高温度', bold)
worksheet.write('F1', '日期', bold)
worksheet.write('G1', '天气状况', bold)
worksheet.write('H1', '风力方向', bold)
worksheet.write('I1', '最低温度', bold)
# 设置列宽度
worksheet.set_column('A:A', 9)
worksheet.set_column('B:B', 26)
worksheet.set_column('C:C', 9)
worksheet.set_column('D:D', 16)
worksheet.set_column('E:E', 9)
worksheet.set_column('F:F', 26)
worksheet.set_column('G:G', 12)
worksheet.set_column('H:H', 16)
worksheet.set_column('I:I', 9)
return worksheet
row = 1
def storeWeatherList(data, worksheet, font):
col = 0
global row
for i in range(0, len(data), 7):
tmp = data[i:i+7]
tmp.insert(1, date[0])
tmp.insert(5, date[1])
for j in range(9):
worksheet.write(row, col+j, tmp[j], font)
row += 1
if __name__ == '__main__':
start_url = 'http://weather.sina.com.cn/china/hunansheng/'
URLs = get_URLs(start_url)
URLs.sort()
workbook = xlsxwriter.Workbook( 'WeatherList.xlsx') # 创建表
worksheet = createListHeader(workbook)
font = workbook.add_format({'align': 'center'})
print("\n ***** 开始爬取天气 *****\n")
for url in URLs:
try:
html = get_html(url)
data = get_data(html)
time = datetime.datetime.now()
print(str(time)[10:19] + ": 正在爬取第 " + str(row) + " 个地区的天气")
storeWeatherList(data, worksheet, font)
except:
print(url, " 页面出错")
continue
try:
print("\n ****** 存储成功! ******")
workbook.close()
print("\n ******* 爬取结束 *******")
except:
print("\n-------------------文档关闭失败----------------\n")
print("---------------请先关闭 Excel 再重新爬取--------")
有些地方写的比较丑,如果你有更好的写法,请立即联系我哦