爬虫(太原2019年9-10月降雨概率)

# http://www.weather.com.cn/weather40d/101100101.shtml
# 收集太原2019年9月10月份的降雨概率
import requests
import json
import pandas as pd

# 结果集合
result_list = []
for i in range(9,11):
    # 待爬取的url
    if i ==9:
        url = "http://d1.weather.com.cn/calendar_new/2021/101100101_202109.html?_=1668857427454"
    if i==10:
        url = "http://d1.weather.com.cn/calendar_new/2021/101100101_202110.html?_=1668859461989"


    headers ={
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "Connection": "keep-alive",
    "Cookie": "f_city=%E5%A4%A7%E5%90%8C%7C101100201%7C",
    "Content-Encoding": "gzip",
    "Content-Type": "text/html",
    "Referer": "http://www.weather.com.cn/",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36 Edg/107.0.1418.42"
    }
    # 爬取
    resp = requests.get(url=url, headers=headers)

    resp.encoding = 'utf-8'
    # 数据字符串
    data_str = resp.text[10:]
    # 转json,变数据集合
    data_list = json.loads(data_str)
    # 循环数据集合,获取数据
    for data in data_list:
        data['城市'] = '太原'
        rain = data['hgl']
        date = data['date']
        result = {}
        result['城市'] = '太原'
        result['降水概率'] = rain
        result['日期'] = date
        result_list.append(result)

print(result_list)
# pandas写入excel
data = pd.DataFrame(result_list)
writer = pd.ExcelWriter('降雨.xlsx')		# 写入Excel文件
data.to_excel(writer, 'page_1', float_format='%.5f')
writer.save()
print("结束!")

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值