import requests
import time
import re
from bs4 import BeautifulSoup
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.204 Safari/537.36',
'Cookie':'AD_RS_COOKIE=20080918; _trs_uv=kahvgie3_6_fc6v'
}
main_address = "http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2021/"
def analysis_html(address, label, label_class):
response = requests.get(address, headers=headers)
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'lxml')
labels = soup.findAll(label, class_=label_class)
return labels
time.sleep(1)
def get_regionalization_array(labels, tag):
regionalization_array = []
for label in labels:
regionalization_array.extend(label.findAll(tag))
return regionalization_array
def get_exact_info(regionalization_array, name, page, number):
regionalization_name = []
regionalization_number = []
regionalization_page = []
for regionalization in regionalization_array:
if name:
regionalization_name.append(regionalization.text)
if page:
regionalization_page.append(regionalization.get('href'))
if number:
for html in regionalization_page:
regionalization_number.append(re.findall(r"(.+?).html", html))
return regionalization_name, regionalization_page, regionalization_number
address = f'{main_address}index.html'
labels = analysis_html(address, 'tr', 'provincetr')
province_array = get_regionalization_array(labels, 'a')
province_name, province_page, province_number = get_exact_info(province_array, True, True, True)
#sort city by province.
city_name_sort = []
city_number_sort = []
city_page_sort = []
for address in province_page:
city_name = []
city_number = []
city_page = []
address = f"{main_address}{address}"
labels = analysis_html(address, 'tr', 'citytr') # get tr array
city_array = get_regionalization_array(labels, 'a')
# print(city_array)
for city in city_array[1::2]:
city_name.append(city.text)
city_page.append(city.get('href'))
city_name_sort.append(city_name)
city_page_sort.append(city_page)
for city in city_array[0::2]:
city_number.append(city.text)
city_number_sort.append(city_number)
time.sleep(1)
name_prov_level = []
number_prov_level = []
page_prov_level = []
for city_address in city_page_sort:
county_name_sort = []
county_number_sort = []
county_page_sort = []
for address in city_address:
county_name = []
county_number = []
county_page = []
address = f"{main_address}{address}"
labels = analysis_html(address, 'tr', 'countytr') # get tr array
county_array = get_regionalization_array(labels, 'td')
time.sleep(1)
for county in county_array[1::2]:
county_name.append(county.text)
county_name_sort.append(county_name)
for county in county_array[0::2]:
county_number.append(county.text)
county_number_sort.append(county_number)
name_prov_level.append(county_name_sort)
number_prov_level.append(county_number_sort)
print("存储数据中...")
with open("province_name.txt", 'w',encoding='utf-8') as f:
f.write(str(province_name))
with open("province_number.txt", 'w',encoding='utf-8') as f:
f.write(str(province_number))
with open("city_name_sort.txt", 'w',encoding='utf-8') as f:
f.write(str(city_name_sort))
with open("city_number_sort.txt", 'w',encoding='utf-8') as f:
f.write(str(city_number_sort))
with open("name_prov_level.txt", 'w',encoding='utf-8') as f:
f.write(str(name_prov_level))
with open("number_prov_level.txt", 'w',encoding='utf-8') as f:
f.write(str(number_prov_level))
结果生成6个txt文件,使用嵌套列表保存数据,后续可制作查询代码以完成相关的检索功能
以下为六个文件按顺序部分数据展示:
[['市辖区'], ['市辖区'], ['石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市',
[['110100000000'], ['120100000000'], ['130100000000', '130200000000',
[[['东城区', '西城区', '朝阳区', '丰台区', '石景山区', '海淀区', '门头沟区',
[[['110101000000', '110102000000', '110105000000', '110106000000', '110107000000', '110108000000', '110109000000', '110111000000',
['北京市', '天津市', '河北省', '山西省', '内蒙古自治区', '辽宁省', '吉林省',
[['11'], ['12'], ['13'], ['14'], ['15'], ['21'], ['22'], ['23'],
参考:知乎-李小狼也是Geek