python爬虫气象数据_python爬虫实战——爬行气象数据保存,Python,爬取,天气

本文介绍了使用Python进行网页爬虫的基本步骤,包括获取HTML信息、解析数据和保存到CSV文件。示例代码展示了如何爬取并解析一个天气网站的数据,包括日期、最高温度、最低温度和风力等级,最终将这些信息写入CSV文件。
摘要由CSDN通过智能技术生成

个人总结的爬虫(爬取数据)的简单步骤:

1、获取待爬取网页的html信息

2、解析爬取的html信息,得到相关的数据

3、保存数据

# coding : UTF-8

import requests

import csv

import random

import time

import socket

import http.client

from bs4 import BeautifulSoup

def get_content(url, data=None):

header = {

'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,',

'Accept-Encoding': 'gzip, deflate',

'Accept-Language': 'zh-CN,zh;q=0.9',

'Connection': 'keep-alive',

'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70'

}

timeout = random.choice(range(80, 180))

while True:

try:

rep = requests.get(url, headers=header, timeout=timeout)

rep.encoding = 'utf-8'

break

# 异常处理

except socket.timeout as e:

print('3:', e)

time.sleep(random.choice(range(8, 15)))

except socket.error as e:

print('4:', e)

time.sleep(random.choice(range(20, 60)))

except http.client.BadStatusLine as e:

print('5:', e)

time.sleep(random.choice(range(30, 80)))

except http.client.IncompleteRead as e:

print('6:', e)

time.sleep(random.choice(range(5, 15)))

return rep.text

def get_data(html_text):

final = []

bs = BeautifulSoup(html_text, "html.parser")

body = bs.body

data = body.find('div', {'class': 'c7d'})

ul = data.find('ul')

li = ul.find_all('li')

for day in li:

temp = []

date = day.find('h1').string

temp.append(date)

inf = day.find_all('p')

temp.append(inf[0].string, )

if inf[1].find('span') is None:

temperature_highest = None

else:

temperature_highest = inf[1].find('span').string

temperature_highest = temperature_highest.replace('℃', '')

temperature_lowest = inf[1].find('i').string

temperature_lowest = temperature_lowest.replace('℃', '')

temp.append(temperature_highest)

temp.append(temperature_lowest)

wind = inf[2].find('i').string #获取风的级数

temp.append(wind)

final.append(temp)

return final

def write_data(data, name):

file_name = name

with open(file_name, 'a', errors='ignore', newline='') as f:

f_csv = csv.writer(f)

f_csv.writerows(data)

if __name__ == '__main__':

url = 'http://www.weather.com.cn/weather/101220802.shtml'

html = get_content(url)

result = get_data(html)

write_data(result, 'weather.csv')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值