初学者——Selenium爬取统计局年度人口分布公开数据代码
(仅爬虫练习,侵删)
数据来源:统计局官网
数据去向:EXCEL表格
数据处理工具:SELENIUM ,GOOGLE浏览器
注意事项:
1.代码中的浏览器驱动地址以及存储CSV地址需要更换本地地址;
2.存储数据:一张表 = 一个工作薄,目录层级关系在工作薄名字上进行展示;
话不多说,直接上代码:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from lxml import etree
from lxml import html
from bs4 import BeautifulSoup
import selenium.webdriver.support.ui as ui
import requests
import pandas as pd
import time
import json
import openpyxl as op
import urllib.request
import re
import hashlib
import datetime
#写入指定的EXCEL文件
def write_to_excel(fileName,sheetname,title,data):
try:
workbook = op.load_workbook(fileName)
worksheet1 = workbook.create_sheet(sheetname)
worksheet1.append( title)
for j in range(len(data)):
insertData = data[j]
worksheet1.append(insertData)
workbook.save(fileName) # 关闭表
except Exception as e:
print('list index out of range')
print(e)
# 获取指定路径下的级数个数
def getElementCnt(resp,path,element):
param1 = resp.xpath(path)
try:
param_code = html.tostring(param1[0])
param_cnt = str(param_code).count(element)
return param_cnt
except Exception as e:
# print("Calculating Element Count Wrong!")
if e is 'list index out of range':
return 0
else:
# print(e)
return 0
#判断下一级个数是否存在
def judgeHasNextFloor(resp,path,element):
a = getElementCnt(resp,path,element)
if a is not 0:
return true
else:
return false
#清除数据自身带来的符号
def cleanString(str1):
return str1.replace('[','').replace(']','').replace('\'''','')
# #获取数据列级信息
def getHeadData(resp):
headlists = []
column_cnt = getElementCnt(resp,'/html/body/div[5]/div[2]/div/div[2]/div[2]/div[2]/div/div[1]/table/thead/tr','<th')
tbhead_list = resp.xpath('/html/body/div[5]/div[2]/div/div[2]/div[2]/div[2]/div/div[1]/table/thead/tr')
for tbhead in tbhead_list:
for i in range(column_cnt):
i +=1
head_pri = tbhead.xpath('./th['+str(i)+']/strong/text()')
head=str(head_pri).replace('[','').replace(']','').replace('\'''','')
headlists.append(head)
print("Columns:"+str(headlists))
return headlists
#获取数据行级信息
def getRowsData(resp):
bodylists = []
tbbody_list = resp.xpath('/html/body/div[5]/div[2]/div/div[2]/div[2]/div[2]/div/div[3]/table/tbody')
row_cnt = getElementCnt(resp,'/html/body/div[5]/div[2]/div/div[2]/div[2]/div[2]/div/div[3]/table/tbody','<tr')
column_cnt = getElementCnt(resp,'/html/body/div[5]/div[2]/div/div[2]/div[2]/div[2]/div/div[1]/table/thead/tr','<th')
for tbbody in tbbody_list:
for j in range(row_cnt):#rows
bodylist = []
j += 1
for k in range(column_cnt):#columns
k += 1
body_pri = tbbody.xpath('./tr['+str(j)+']/td['+str(k)+']/text()')
body=str(body_pri).replace('[','').replace(']','').replace('\'''','')
bodylist.append(body)
bodylists.append(bodylist)
print("Rows:"+str(bodylist))
return bodylists
def printHeadAndData(resp,fileName,sheetname):
headlists = getHeadData(resp)
bodylists = getRowsData(resp)
write_to_excel(fileName,sheetname,headlists,bodylists)
# 获取网页驱动
driver = webdriver.Chrome(executable_path='D:/test/chromedriver.exe')
wait = ui.WebDriverWait(driver,10)
# 获取网页信息
driver.get('https://data.stats.gov.cn/easyquery.htm?cn=C01')
# 针对Google私密设置
click_goon = driver.find_element_by_xpath("/html/body/div/div[3]/p[2]/a[text()='继续前往data.stats.gov.cn(不安全)']")
driver.execute_script("arguments[0].click();", click_goon)
driver.get('https://data.stats.gov.cn/easyquery.htm?cn=C01')
driver.switch_to.default_content()
time.sleep(2)
# 设置页面功能维度转换
converse=driver.find_element_by_xpath("/html/body/div[11]/ul/li[2]/a[text()='转置']")
driver.execute_script("arguments[0].click();", converse)
time.sleep(2)
#设置展示最近20年数据,暂未成功
# ele = driver.find_element(By.XPATH,'(//*[@title="最近20年"])[1]')
# ActionChains(driver).move_to_element(ele).perform() # 鼠标悬停
# 定位鼠标悬停元素 触发下拉框菜单
# # 选择第一层单位
# driver.find_element(By.XPATH,'//*[@id="treeZhiBiao_2_span"]').click()
# # 选择第二层单位
# driver.find_element(By.XPATH,'//*[@id="treeZhiBiao_30_span"]').click()
# time.sleep(2)
# 获取网页源代码
resp = driver.page_source
resp = etree.HTML(resp)
# 获取一级菜单栏地址和个数
floor1_cnt = getElementCnt(resp,'/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul','<li')
print("The number of data`s floors is "+str(floor1_cnt))
basepath = '/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul'
fileName = 'E:/User/datamining/c_data.xlsx'
floor1_id_list = []
floor1_name_list = []
floor2_id_list = []
floor2_name_list = []
floor3_name_list = []
floor3_id_list = []
floor4_name_list = []
floor4_id_list = []
#判断菜单层级是否为0 ,为0时记录当前层级ID值;不为0时继续遍历下一级;下面展示的就是不为0时获取菜单的ID和名称
def converseMenu(cnt,basepath):
floor_list = []
floor_name_list = []
floor_id_list = []
if cnt is not 0:
for c in range(cnt):
c += 1
floor = driver.find_element_by_xpath('/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul/li['+str(c)+']/a/span[2]')
driver.execute_script("arguments[0].click();", floor)
time.sleep(2)
resp2 = driver.page_source
resp2 = etree.HTML(resp2)
cnt2 = getElementCnt(resp2,'/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul/li['+str(c)+']/ul','<li')
time.sleep(1)
print(floor.text +'的下一层菜单个数为 = '+str(cnt2))
if cnt2 is not 0 :
for c2 in range(cnt2):
c2 += 1
floor2 = driver.find_element_by_xpath('/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul/li['+str(c)+']/ul/li['+str(c2)+']/a/span[2]')
driver.execute_script("arguments[0].click();", floor2)
time.sleep(1)
resp3 = driver.page_source
resp3 = etree.HTML(resp3)
cnt3 = getElementCnt(resp3,'/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul/li['+str(c)+']/ul/li['+str(c2)+']/ul','<li')
print(floor.text+'_'+floor2.text +' 下一层菜单个数为 '+str(cnt3))
if cnt3 is not 0 :
for c3 in range(cnt3):
c3 += 1
floor3 = driver.find_element_by_xpath('/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul/li['+str(c)+']/ul/li['+str(c2)+']/ul/li['+str(c3)+']/a/span[2]')
driver.execute_script("arguments[0].click();", floor3)
time.sleep(1)
resp4 = driver.page_source
resp4 = etree.HTML(resp4)
cnt4 = getElementCnt(resp4,'/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul/li['+str(c)+']/ul/li['+str(c2)+']/ul/li['+str(c3)+']/ul','<li')
print(floor.text+'_'+floor2.text+'_'+floor3.text +' 下一层菜单个数为 '+str(cnt4))
if cnt4 is not 0 :
for c4 in range(cnt4):
c4+=1
floor4 = driver.find_element_by_xpath('/html/body/div[5]/div[2]/div/div[1]/div/ul/li/ul/li['+str(c)+']/ul/li['+str(c2)+']/ul/li['+str(c3)+']/ul/li['+str(c4)+']/a/span[2]')
driver.execute_script("arguments[0].click();", floor4)
time.sleep(1)
resp4_5 = driver.page_source
resp4_5 = etree.HTML(resp4_5)
printHeadAndData(resp4_5,fileName,floor.text+'_'+floor2.text+'_'+floor3.text+'_'+floor4.text)
floor4_name = floor4.text
floor4_id = floor4.get_attribute('id')
floor_name_list.append(floor4_name)
floor_id_list.append(floor4_id)
floor4_name_list.append(floor4_name)
floor4_id_list.append(floor4_id)
else:
resp3_4 = driver.page_source
resp3_4 = etree.HTML(resp3_4)
printHeadAndData(resp3_4,fileName,floor.text+'_'+floor2.text+'_'+floor3.text)
floor3_name = floor3.text
floor3_id = floor3.get_attribute('id')
floor_name_list.append(floor3_name)
floor_id_list.append(floor3_id)
floor3_name_list.append(floor3_name)
floor3_id_list.append(floor3_id)
else:
resp2_3 = driver.page_source
resp2_3 = etree.HTML(resp2_3)
printHeadAndData(resp2_3,fileName,floor.text+'_'+floor2.text)
floor2_name = floor2.text
floor2_id = floor2.get_attribute('id')
floor_name_list.append(floor2_name)
floor_id_list.append(floor2_id)
floor2_name_list.append(floor2_name)
floor2_id_list.append(floor2_id)
else:
resp1_2 = driver.page_source
resp1_2 = etree.HTML(resp1_2)
printHeadAndData(resp1_2,fileName,floor.text)
floor_name = floor.text
floor_id = floor.get_attribute('id')
floor_name_list.append(floor_name)
floor_id_list.append(floor_id)
floor1_name_list.append(floor_name)
floor1_id_list.append(floor_id)
floor_list.append(floor_name_list)
floor_list.append(floor_id_list)
return floor_list
else:
return false
# 第一层
floor1_list = converseMenu(floor1_cnt,basepath)
# print("The click floors are as list: "+str(floor_list))
# print("The first floors are as list: "+str(floor1_name_list))
# print("The second floors are as list: "+str(floor2_name_list))
driver.close()
driver.quit()
爬虫页面显示:
EXCEL表格展示数据,目录间的上下级关系通过下划线“_”连接的,表格数量非常多:
欢迎小伙伴留言讨论哦~