1 数据搜集
使用 urllib 库中的request 模块爬取赶集网发布的上海二手房信息,包括包括户型、面积、单价等,再使用BeautifulSoup 库解析爬取的HTML数据,最终将数据保存到CSV文件中。
import urllib.request
from bs4 import BeautifulSoup
import pandas as pd
#爬取数据
def request_Data(url):
#创建requests对象
req = urllib.request.Request(url)
page_data_list = []
with urllib.request.urlopen(req) as response:
data = response.read()
htmlstr = data.decode()
L = parse_HTMLData(htmlstr)
page_data_list.extend(L)
return page_data_list
#解析数据
def parse_HTMLData(htmlstr):
sp = BeautifulSoup(htmlstr,'html.parser')
#获得房子信息列表
house_list = sp.select('#f_mew_list > div.f-main.f-clear.f-w1190 > div.f-main-left.f-fl.f-w980 > div.f-main-list > div > div')
#当前页中的记录列表
page_list = []
for house in house_list:
#每一行数据
rows_list = []
#获得房子标题
title = house.select('dl > dd.dd-item.title > a')
title = (title[0].text).strip()
rows_list.append(title)
#获得房子信息
infos = house.select('dl > dd.dd-item.size > span')
# 获得房子户型
house_type = (infos[0].text).strip()
rows_list.append(house_type)
# 获得房子面积
house_area = (infos[2].text).strip()
rows_list.append(house_area)
# 获得房子面积
house_face = (infos[4].text).strip()
rows_list.append(house_face)
# 获得房子楼层
house_floor = (infos[6].text).strip()
rows_list.append(house_floor)
#获得房子所在城区
addr_dist = house.select('dl > dd.dd-item.address > span > a.address-eara')
addr_dist = (addr_dist[0].text).strip()
rows_list.append(addr_dist)
#获得房子所在小区
addr_name = house.select('dl > dd.dd-item.address > span > a > span')
addr_name = (addr_name[0].text).strip()
rows_list.append(addr_name)
#获得房子总价
total_price = house.select('dl > dd.dd-item.info > div.price')
total_price = (total_price[0].text).strip()
rows_list.append(total_price)
#获得房子单价
price = house.select('dl > dd.dd-item.info > div.time')
price = (price[0].text).strip()
rows_list.append(price)
page_list.append(rows_list)
return page_list
url_temp = 'http://sh.ganji.com/ershoufang/pn{}/'
data_list