设计思路(暂缺)
还没整理好语言
代码
导入库
import requests
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
获取网页链接
def getHTMLText(url, timeout=30):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return '产生异常'
获取网页源码
def get_html(html):
final_list = []
soup = BeautifulSoup(html, 'html.parser')
body = soup.body
return body
处理异常值函数,并且避免重复。不用set是因为网页害怕url抓取错误
def check_all(s,a):
return 'None' in s or '详情' in s or s in a
获取七天的天气数据
def get_data(html):
final_list = []
soup = BeautifulSoup(html, 'html.parser')
body = soup.body
data = body.find('div', {'id': '7d'})
ul = data.find('ul')
lis = ul.find_all('li')
for day in lis:
temp_list = []
date = day.find('h1').string
temp_list.append(date)
info = day.find_all('p')
temp_list.append(info[0].string)
if info[1].find('span') is None:
temperature_highest = ' '
else:
temperature_highest = info[1].find('span').string
temperature_highest = temperature_highest.replace('℃', ' ')
if info[1].find('i') is None:
temperature_lowest = ' '
else:
temperature_lowest = info[1].find('i').string
temperature_lowest = temperature_lowest.replace('℃', ' ')
temp_list.append(temperature_highest)
temp_list.append(temperature_lowest)
wind_scale = info[2].find('i').string
temp_list.append(wind_scale)
final_list.append(temp_list)
return final_list
获取所有省的url
url = 'http://www.weather.com.cn/textFC/hn.shtml'
html = getHTMLText(url)
final_list = get_html(html)
final_list=final_list.find('div', {'class': 'lqcontentBoxheader'})
final_list = final_list.find_all('li')
final_a=[]
add=[]
url=[]
s='http://www.weather.com.cn'
for x in final_list:
list=x.find_all('a')
for i in list:
add.append(i.string)
url.append(s+i.get('href'))
final_a.append(list)
print(url)
print(add)
获取每个省各个市区县的url
count = 0
index=0
province_info={}
final_url=[]
final_city=[]
for i in url:
a_url=i
html = getHTMLText(a_url)
final_list = get_html(html)
final_list=final_list.find_all('div', {'class': 'conMidtab3'})
# print(len(final_list))
# print(final_list[0:len(final_list)])
final_a=[]
city=[]
c_url=[]
for i in final_list:
s=i.find_all('a')
for i in s:
s=str(add[index])+'-'+str(i.string)
if check_all(s,city):
continue
city.append(s)
c_url.append(i.get('href'))
final_a.append(s)
final_url.append(c_url)
final_city.append(city)
count+=len(city)
province_info[add[index]]=city
index+=1
print(count)
print(final_city)
print(final_url)
print(province_info)
把各个标签映射成字典
final_city=[i for item in final_city for i in item]
final_url=[i for item in final_url for i in item]
final_dic=dict(zip(final_city,final_url))
print(final_dic)
遍历字典,获取各个县市七天的天气详情
final_result=[]
for key in final_dic:
url =final_dic[key]
html=getHTMLText(url)
wether=get_data(html)
tmp=[]
for i in wether:
result=[key]
result=result+i
tmp.append(result)
print(result)
final_result.append(tmp)