Python爬虫:
# 爬取淮安出租房源信息1111套
# 爬取内容为小区名、户型、面积、价格、地址
# 本次爬取使用xpath进行数据的提取
1、导入模块
import requests
from lxml import etree
import threading
2、定义huaian_chuzu_house(i)函数进行页面爬取
def huaian_chuzu_house(i):
url = 'https://m.lianjia.com/chuzu/ha/zufang/pg%s/?ajax=1' % i
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/'
'537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/'
'537.36 Core/1.63.6788.400 QQBrowser/10.3.2727.400',
'Referer': 'https://m.lianjia.com/ha/',
'Upgrade-Insecure-Requests': '1'
}
response = requests.get(url=url, headers=headers)
html = response.text
3、解析页面
info_html = etree.HTML(html)
xiaoqus = info_html.xpath('//div[@class="content__item__main"]')
for xiaoqu in xiaoqus:
# 先获取一套房源的信息,包括小区名、朝向、面积、地址、价格
name = xiaoqu.xpath('.//p[@class="content__item__title"]/text()')[0].strip()
area = xiaoqu.xpath('.//p[@class="content__item__content"]/text()')[0].strip().split('\n')[0].strip()
address = xiaoqu.xpath('.//p[@class="content__item__content"]/text()')[0].strip().split('\n')[2].strip()
price = xiaoqu.xpath('.//p[@class="content__item__bottom"]/text()')[0].strip()
# 定义字典保存信息
rets = {
"户型": name,
"面积": area,
"地址": address,
"价格元/月": price
}
# 打印信息
print(rets)
4、创建主函数
def main():
for i in range(56):
# 创建一个线程
pl = threading.Thread(huaian_chuzu_house(i))
pl.start()
print(i)
5、对函数调用运行
if __name__ == '__main__':
main()
6、结果截图