抓取数据的页面:
小区名字:
代码:
import scrapy
from selenium import webdriver
import time
import json
import os
class LianjiaSpider(scrapy.Spider):
name = "lianjia"
allowed_domains = ["lianjia.com"]
start_urls = ["https://sh.lianjia.com/ershoufang/sq5968/?sug=***小区名字"]
def parse(self, response):
# 设置浏览器驱动器路径
driver_path = r'E:\software\chrome\chromedriver127\chromedriver127\chromedriver-win64\chromedriver.exe'
# 创建一个 ChromeOptions 对象
options = webdriver.ChromeOptions()
# 设置浏览器驱动器路径
options.binary_location = driver_path
# 创建一个 Chrome 浏览器实例
driver = webdriver.Chrome(options=options)
# 打开链家网站首页
driver.get(self.start_urls[0])
time.sleep(1)
#
page_source = driver.page_source
response = scrapy.Selector(text=page_source)
# 解析数据
listings = []
for li in response.css('ul.sellListContent li.clear'):
img = li.css('a img.lj-lazy::attr(src)').get()
title = li.css('div.title a::text').get()
detail_url = li.css('div.title a::attr(href)').get()
community = li.css('div.flood a[data-el="region"]::text').get()
area = li.css('div.address div.houseInfo::text').get()
follow_info = li.css('div.followInfo::text').get()
tag = li.css('div.tag span::text').getall()
total_price = li.css('div.priceInfo div.totalPrice span::text').get()
unit_price = li.css('div.priceInfo div.unitPrice span::text').get()
listings.append({
'img': img,
'title': title,
'detail_url': detail_url,
'community': community,
'area': area,
'follow_info': follow_info,
'tag': tag,
'total_price': total_price,
'unit_price': unit_price,
})
# 保存数据到JSON文件
output_file = os.path.join(os.getcwd(), 'lianjia_listings.json')
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(listings, f, ensure_ascii=False, indent=4)
self.log(f'Saved file {output_file}')
#
# # 等待20秒
time.sleep(20)
# 关闭浏览器
driver.quit()
导出的json: