2024全国行政区域爬取脚本(含街道)

from bs4 import BeautifulSoup
import requests
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED

base_url = 'https://www.stats.gov.cn/sj/tjbz/tjyqhdmhcxhfdm/2023/'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0'
}

executor = ThreadPoolExecutor(max_workers=40)


def start():
    response = requests.get(base_url, headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')
    list_a = soup.findAll('tr', class_='provincetr')
    shen = []
    for item in list_a:
        td = item.findAll('td')
        for a in td:
            b = a.find('a')
            shen.append({'href': b['href'], 'Name': b.text, 'Id': b['href'].replace('.html', '0000'), 'Pid': '1',
                         'FullName': f'中国,{b.text}', 'Level': '1', 'shenId': b['href'].replace('.html', ''),
                         'fileName': '全国行政区划数据'})

    # # 通过省找市区
    for shenItem in shen:
        getCityData(shenItem)


def getCityData(data):
    fileName = data['fileName']
    writeToFile(data, fileName)
    shi = []
    url = base_url + data['href']
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')
    list_a = soup.findAll('tr', class_='citytr')
    for item in list_a:
        td = item.findAll('td')
        try:
            code = td[0].find('a')
            name = td[1].find('a')
            shi.append({'href': code['href'], 'Name': name.text, 'Id': code.text, 'Pid': data['Id'],
                        'FullName': f"{data['FullName']},{name.text}", 'Level': '2', 'shenId': data['shenId'],
                        'fileName': fileName})
        except:
            shi.append({'href': 'xxxx', 'Name': td[1].text, 'Id': td[0].text, 'Pid': data['Id'],
                        'FullName': f"{data['FullName']},{td[1].text}", 'Level': '2', 'shenId': data['shenId'],
                        'fileName': fileName})
    for item in shi:
        getQuXianData(item)


def getQuXianData(data):
    fileName = data['fileName']
    writeToFile(data, fileName)
    quXian = []
    if 'xxxx' == data['href']:
        return
    url = base_url + data['href']
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')
    list_a = soup.findAll('tr', class_='countytr')
    for item in list_a:
        td = item.findAll('td')
        try:
            code = td[0].find('a')
            name = td[1].find('a')
            quXian.append({'href': code['href'], 'Name': name.text, 'Id': code.text, 'Pid': data['Id'],
                           'FullName': f"{data['FullName']},{name.text}", 'Level': '3', 'shenId': data['shenId'],
                           'fileName': fileName})
        except:
            quXian.append({'href': 'xxxx', 'Name': td[1].text, 'Id': td[0].text, 'Pid': data['Id'],
                           'FullName': f"{data['FullName']},{td[1].text}", 'Level': '3', 'shenId': data['shenId'],
                           'fileName': fileName})

    for item in quXian:
        getJieDao(item)


def getJieDao(data):
    fileName = data['fileName']
    writeToFile(data, fileName)
    jieDao = []
    if data['href'] == 'xxxx':
        return
    url = base_url + data['shenId'] + '/' + data['href']
    print(f'街道  {url}')
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.content, 'html.parser')
    list_a = soup.findAll('tr', class_='towntr')
    for item in list_a:
        td = item.findAll('td')
        code = td[0].find('a')
        name = td[1].find('a')
        jieDao.append({'href': code['href'], 'Name': name.text, 'Id': code.text, 'Pid': data['Id'],
                       'FullName': f"{data['FullName']},{name.text}", 'Level': '4'})
    for jieDaoItem in jieDao:
        writeToFile(jieDaoItem, fileName)


def writeToFile(data, fileName):
    with open(f'./areaData/{fileName}.txt', 'a', encoding='utf-8') as file:
        array = []
        array.append(data['Id'])
        array.append(data['Pid'])
        array.append(data['FullName'])
        array.append(data['Level'])
        array.append(data['Name'])
        array.append('\n')
        line = '|'.join(array)
        file.write(line)


start()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值