Python-爬取历史地震数据并可视化

Python-爬取历史地震数据并可视化

      主要是通过模拟浏览器请求,获取网页源代码。然后利用xpath解析数据,保存到Excel表格中,或者Mysql数据库中。

  • 爬虫地址

http://www.ceic.ac.cn/speedsearch

  • 成果预览
全球历史地震数据
  • 实现代码
# -*- coding: utf-8 -*-

"""
@File    : request200606_中国地震台网.py
@Author  : fungis@163.com
@Time    : 2020/05/06 09:28
@notice  : http://www.ceic.ac.cn/speedsearch
"""

import datetime
import requests
from lxml import etree
import pymysql
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Geo
from pyecharts.globals import ChartType, ThemeType

# 定义一组变量
earthquake_n = []
earthquake_t = []
earthquake_location_lat = []
earthquake_location_lon = []
earthquake_location = []
earthquake_url = []
earthquake_deapth = []
earrh_data = []
geo_location = {}
geo_data = []
geo_attrs = []
geo_values = []


# 获取网页源代码
def Get_html(url: object, params: object) -> object:
    header = {'User-Agent': 'Mozilla/5.0'}
    params = params
    r = requests.get(url, params=params, headers=header)
    print(r.url)
    if r.status_code == 200:
        r.encoding = r.apparent_encoding
        # print(r.text)
        html = r.text
    else:
        print("网页爬取异常")
        html = "网页爬取异常"
    return (html)


# 解析、提取网页源代码
def Get_data(html):
    html = etree.HTML(html)
    trs = html.xpath("//div[@class='title-content']/div[@class='speedquery']/div[@id='speed-search']/table["
                     "@class='speed-table1']/tr")
    for tr in trs:
        earthquake_m1 = tr.xpath("./td[1]/text()")
        earthquake_t1 = tr.xpath("./td[2]/text()")
        earthquake_location_lat1 = tr.xpath("./td[3]/text()")
        earthquake_location_lon1 = tr.xpath("./td[4]/text()")
        earthquake_deapth1 = tr.xpath("./td[5]/text()")
        earthquake_location1 = tr.xpath("./td[6]/a/text()")
        earthquake_url1 = tr.xpath("./td[6]/a/@href")
        # print(earthquake_m1, earthquake_t1, earthquake_location_lat1, earthquake_location_lon1, earthquake_deapth1,
        #       earthquake_location1, earthquake_url1)

        try:
            earthquake_n.append(earthquake_m1[0])
            earthquake_t.append(earthquake_t1[0])
            earthquake_location_lat.append(earthquake_location_lat1[0])
            earthquake_location_lon.append(earthquake_location_lon1[0])
            earthquake_deapth.append(earthquake_deapth1[0])
            earthquake_location.append(earthquake_location1[0])
            earthquake_url.append(earthquake_url1[0])
        except:
            # print("异常")
            pass

    for i in range(0, len(earthquake_n) - 1):
        earrh_data.append((earthquake_n[i], earthquake_t[i], earthquake_location_lat[i], earthquake_location_lon[i],
                           earthquake_deapth[i], earthquake_location[i], earthquake_url[i]))


# 在Mysql数据库中创建表格
def Mysql_create_table(name):
    client = pymysql.connect(user="root", host="localhost", passwd="an6688", db="pydat")
    cursor = client.cursor()
    sql = "create table if not exists table_%s" % name + "(earthquake_n VARCHAR(100),earthquake_t VARCHAR(100),earthquake_location_lat VARCHAR(100)" \
                                                         ", earthquake_location_lon VARCHAR(100),earthquake_deapth VARCHAR(100),earthquake_location VARCHAR(200)," \
                                                         "earthquake_url VARCHAR(100));"
    cursor.execute(sql)
    cursor.close()
    client.close()


# 插入地震数据到Mysql表格中
def Mysql_data(name, earrh_data):
    client = pymysql.connect(user="root", host="localhost", passwd="an6688", db="pydat")
    cursor = client.cursor()
    sql = "insert into table_%s" % name + " values(%s,%s,%s,%s,%s,%s,%s)"
    cursor.executemany(sql, earrh_data)
    client.commit()
    cursor.close()
    client.close()
    
def Geo_chart(geo_location, geo_attrs, geo_values):
    attr = geo_attrs  # 名称
    value = geo_values  # 位置

    # 利用pyechart进行制图
    geo = Geo(init_opts=opts.InitOpts(width='1200px', height='700px', theme=ThemeType.DARK, bg_color="#404a59"))

    geo.set_global_opts(
        visualmap_opts=opts.VisualMapOpts(is_piecewise=True,
                                          # range_size=[0, np.max(value)],
                                          max_=10
                                          ),
        title_opts=opts.TitleOpts(title='世界近年历史地震分布图', pos_left='500px', ))

    # 添加主题,中国地图,填充及边界颜色设置
    geo.add_schema(
        maptype='world',
        # 修改地图yanse
        itemstyle_opts=opts.ItemStyleOpts(border_color='#fff', color='#323C48'),
        # symbol_size=15,
    )
    geo_cities_coords = []
    for k in geo_location:
        # 自定义坐标写入
        dict_value = str(geo_location[k]).replace('[', '').replace(']', '')
        geo_cities_coords.append(str(k) + ',' + dict_value)

    for k in geo_cities_coords:
        geo.add_coordinate(k.split(',')[0], float(k.split(',')[1]), float(k.split(',')[2]))

    data = list(zip(attr, value))
    # print(data)

    geo.add("", data,
            type_=ChartType.EFFECT_SCATTER,  # 散点图的一种形式
            label_opts=opts.LabelOpts(is_show=False),  # 不显示数值则设置为False
            )
    geo.render('./earthquake.html')


if __name__ == "__main__":
    url = "http://www.ceic.ac.cn/speedsearch"
    for i in range(1, int(input("请输入大于1的整数:"))):
        params = {"time": 6, "page": i}
        html = Get_html(url, params)
        Get_data(html)
    df = pd.DataFrame(earrh_data, columns=['震级', '时间', '纬度', '经度', '震源深度', '地址', 'url'])
    df.drop_duplicates(inplace=True)  # 数据清洗去重
    df.to_excel('./earthquake' + str(datetime.datetime.now().strftime('%Y%m%d')) + '.xlsx',
                encoding='gbk')  # 写入excel中

    for index, row in df.iterrows():
        geo_location[(row['地址'])] = [float(row['经度']), float(row['纬度'])]
        geo_attrs.append(row['地址'])
        geo_values.append(float(row['震级']))
    Geo_chart(geo_location, geo_attrs, geo_values)
    print('爬取完成')
    
    # 利用Mysql数据库存储地震数据(前提是已经安装mysql,修改上方数据库的密码)
    # name = input("表名:")
    # Mysql_create_table(name)
    # Mysql_data(name, earrh_data)
  • 结尾
    喜欢的朋友们可以点个关注,后续将持续更新,精彩无限^ - ^
  • 12
    点赞
  • 125
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
Python爬虫是一种用于从网页上获取数据的技术。实时爬取天气数据可以通过以下步骤实现: 1. 导入所需的库:首先,需要导入Python的requests库和BeautifulSoup库。Requests库用于发送HTTP请求,而BeautifulSoup库用于解析HTML页面。 2. 发送HTTP请求:使用requests库发送GET请求到天气数据的网站。可以使用城市名称或者经纬度作为参数来获取相应城市的天气数据。 3. 解析HTML页面:使用BeautifulSoup库解析返回的HTML页面。可以使用CSS选择器或XPath来定位所需的天气数据。 4. 提取天气数据:根据HTML页面的结构,提取所需的天气数据。可以使用BeautifulSoup提供的方法来提取标签内的文本内容。 5. 处理和展示数据:对提取到的天气数据进行处理和展示。可以将数据保存到文件中或者通过其他方式展示给用户。 以下是一个简单的示例代码,用于实时爬取天气数据: ```python import requests from bs4 import BeautifulSoup def get_weather(city): url = f'https://www.weather.com.cn/weather/{city}.shtml' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36' } response = requests.get(url, headers=headers) response.encoding = 'utf-8' soup = BeautifulSoup(response.text, 'html.parser') weather = soup.find(class_='tem').text.strip() return weather city = '101010100' # 北京的城市代码 weather_data = get_weather(city) print(f"北京的实时天气为:{weather_data}") ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值