爬取网站:https://www.phb123.com/city/renkou/rk.html
# 导入相关函数包
import requests
from bs4 import BeautifulSoup
import pandas as pd
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}#爬虫[Requests设置请求头Headers],伪造浏览器
# 核心爬取代码https://www.phb123.com/city/renkou/rk_9.html
listData=[] #定义数组
counData=[] #定义数组
for i in range(1,10):
if i==1:
url= 'https://www.phb123.com/city/renkou/rk.html'
else:
url = 'https://www.phb123.com/city/renkou/rk_%s.html'%i
params = {"show_ram":1}
response = requests.get(url,params=params, headers=headers) #访问url
soup = BeautifulSoup(response.text, 'html.parser') #获取网页源代码
tr = soup.find('table',class_='rank-table').find_all('tr') #.find定位到所需数据位置 .find_all查找所有的tr(表格)
# 去除标签栏
for j in tr[1:]: #tr[1:]遍历第1列到最后一列,表头为第0列
td = j.find_all('td')#td表格
rank = td[0].get_text().strip() #世界排名
country = td[1].get_text().strip() #国家
counData.append(country)
number = td[2].get_text().strip() #人口数量
listData.append([rank,country,number])
# 存储结果
dataRank = pd.DataFrame(listData,columns=["世界排名","国家名称","人口数量"])
dataRank.to_csv("世界人口排名2020.csv",index=False)
# 读取公司国家数据
dataCoun = pd.read_excel("各国人口数据.xlsx")
count = 0
for item in dataCoun.iterrows():
counName = item[1]['国家名称']
if counName in counData:
# print(counName)
count = count+1
dataCoun.iloc[item[0],2] = dataRank[dataRank["国家名称"] == counName]["人口数量"].values.tolist()[0]
dataCoun.to_csv("各国人口数据.csv",index=False)
结果展示