代码编写平台:jupyter notebook
选用数据:贝壳网广州地区的房源信息
第一部分:爬取房源信息代码
〈这部分代码网上有很多〉
import random
import requests
from bs4 import BeautifulSoup
from time import sleep
import pandas as pd
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
#设置广州各区域对应的网页url
region = {
'nansha': 'nansha/',
'liwan': 'liwan/',
'yuexiu': 'yuexiu/',
'haizhu': 'haizhu/',
'tianhe': 'tianhe/',
'baiyun': 'baiyun/',
'huangpu': 'huangpugz/',
'panyu': 'panyu/',
'huadou': 'huadou/',
'zengcheng': 'zengcheng/',
'conghua': 'conghua/'
}
#随机取user-agents
headers = {
"User-Agent": random.choice(USER_AGENTS)}
def spider(regions):
'''regions可设置为广州某个地区或者是全部爬取'''
main_url = 'https://gz.fang.ke.com/loupan/'
for key, values in region.items():
if key == regions:
all_list = []
url = main_url + values
response = requests.get(url, timeout=10, headers=headers)
html = response.content
soup = BeautifulSoup(html, "lxml")
#通过得到的结果计算页数,每页10个,并进行四舍五入
page = round(int(soup.find('span', class_="value").string) / 10)
for i in range(1, page + 1):
sleep(1.7)
page_url = main_url + values + f'pg{i}'
response