python3 抓取国家统计局2017年省市区街道数据

# -*- coding: utf-8 -*-

import pandas as pd
import requests
import urllib.request
import json
from bs4 import BeautifulSoup
baseUrl = "http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2017/"
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}

def getPro(f):
    resp = requests.get(baseUrl,headers=headers)
    #resp.encoding = 'utf-8'
    html = resp.content.decode("gbk","ignore")
    # print(html)
    soup = BeautifulSoup(html,"html.parser")
    proArr = soup.find_all("tr",class_="provincetr")
    for pro in proArr:
        tdArr =  pro.find_all("td")
        for td in tdArr:
            if  td.text != '':
            #    print(td.text)
                getCity(f,td.text,td.find("a")["href"])

def getCity(f,proName,url):
    resp = requests.get(baseUrl+url,headers=headers)
    html = resp.content.decode("gbk","ignore")
    soup = BeautifulSoup(html,"html.parser")
    cityArr = soup.find_all('tr',class_="citytr")
    bodyUrl=url.split(".")[0]
    #print(bodyUrl)
    for city in cityArr:
        cityName=city.find_all("td")[1].text
     #   print(proName,cityName)
        if city.find("a") is not None:
         getDistict(f,proName,cityName,bodyUrl,city.find("a")["href"])

def getDistict(f,proName,cityName,bodyUrl,url):
    resp = requests.get(baseUrl + url, headers=headers)
    html = resp.content.decode("gbk","ignore")
    soup = BeautifulSoup(html,"html.parser")
    distictArr = soup.find_all('tr', class_="countytr")
    for dis in distictArr:
        disName=dis.find_all("td")[1].text
    #    print(proName, cityName,disName,bodyUrl+"/"+dis.find("a")["href"])
        if dis.find("a") is not None:
            getTown(f,proName, cityName,disName,bodyUrl+"/"+dis.find("a")["href"])


def getTown(f,proName, cityName,disName,url):
    resp = requests.get(baseUrl + url, headers=headers)
    html = resp.content.decode("gbk","ignore")
    soup = BeautifulSoup(html,"html.parser")
    townArr = soup.find_all('tr', class_="towntr")
    for town in townArr:
        townName=town.find_all("td")[1].text
        name="""%s%s%s%s%s%s%s""" % (proName,',',cityName,',', disName,',',townName)
        print (name)
        f.write(name)
        f.write("\n")



if __name__ == "__main__":
    with open("location.txt", 'wt', encoding='UTF-8') as f:
     getPro(f)

...

...

 
  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值