链家爬虫
没啥教程就是简单的爬虫 加个正则 有疑问公众号后台留言给你处理。
公众号–>python网络小蜘蛛
# -*- endoding: utf-8 -*-
# @ModuleName:链家
# @Function(功能):
# @Author : 苏穆冰白月晨
# @Time : 2021/4/7 10:19
import requests, re
from fake_useragent import UserAgent
import time
import csv
headers = {
'UserAgent':UserAgent().random,
'Cookie': 'lianjia_uuid=f04bbf1f-5132-45bf-8ac3-a100531e4d4d; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1617159068; UM_distinctid=17886311e4437d-0d1133832a12c7-5771031-144000-17886311e45450; _smt_uid=6063e39e.5f664d99; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2217886312828574-02d1fbeeecf7ff-5771031-1327104-1788631282970c%22%2C%22%24device_id%22%3A%2217886312828574-02d1fbeeecf7ff-5771031-1327104-1788631282970c%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; _ga=GA1.2.907807975.1617159080; login_ucid=2000000161545349; lianjia_token=2.001030290d700bb864019d003c207cb384; lianjia_token_secure=2.001030290d700bb864019d003c207cb384; security_ticket=C8amt+uFCkgOhn/I6vCdEbvSEjibLtSIKf2aFnAyYOl9ZZAUN2m21h6yrYu1S+/b8+lBNzBeSbLLsH3Zpl1dVXkPMHObtz7EkVLOp0mov1HDDbtw66+9zanNwb6m8Lae3HDRvsYAKPZbjSrYD5nPtAoITG2wI88fZySlyNAN5Ss=; Hm_lpvt_9152f8221cb6243a53c83b956842be8a=1617161206; select_city=610100; lianjia_ssid=d353d143-1a0a-477a-aaad-230bbb5549ed; _gid=GA1.2.860179285.1617761947Host: imapi.lianjia.com'
}
def request_cookies():
url = 'https://xa.fang.lianjia.com/'
sess = requests.session()
cookies = sess.get(url, headers = headers).cookies
for a in range (0 , 101):
request_data(cookies , a)
def request_data(cookies, a):
url = 'https://xa.fang.lianjia.com/loupan/pg' + str(a)
response = requests.get(url, cookies = cookies, headers = headers).text
for i in range(0,10):
response_re(response, i)
def response_re(response, i):
guize_leixing = """<span class="resblock-type" style="background:.*?">(.*?)</span>"""
leixing = re.findall(guize_leixing, response)[i]
guize_shoukuang = """<span class="sale-status" style="background: #.*?">(.*?)</span>"""
shoukuang = re.findall(guize_shoukuang, response)[i+2]
guize_zhuti = """<a href=".*?" class="name " target="_blank" .*?>(.*?)</a>"""
zhuti = re.findall(guize_zhuti, response)[i]
guize_zilianjie = """<a href="(.*?)" class="name " target="_blank" .*?>.*?</a>"""
zilianjie = r'https://xa.fang.lianjia.com/' + re.findall(guize_zilianjie, response)[i]
guize_junjia = """<span class="number">(.*?)</span>"""
junjia = re.findall(guize_junjia, response)[i] + '元/㎡(均价)'
guize_mianji = """<div class="resblock-area">
<span>(.*?)</span>
</div>"""
mianji = re.findall(guize_mianji, response)[i]
guize_dizhi1 = """<div class="resblock-location">
<span>(.*?)</span>
<i class="split">/</i>
<span>.*?</span>
<i class="split">/</i>
<.*?>.*?</a>
</div>"""
dizhi1 = re.findall(guize_dizhi1, response)[i]
guize_dizhi2 = """<div class="resblock-location">
<span>.*?</span>
<i class="split">/</i>
<span>(.*?)</span>
<i class="split">/</i>
<.*?>.*?</a>
</div>"""
dizhi2 = re.findall(guize_dizhi2, response)[i]
guize_dizhi3 = """<div class="resblock-location">
<span>.*?</span>
<i class="split">/</i>
<span>.*?</span>
<i class="split">/</i>
<.*?>(.*?)</a>
</div>"""
dizhi3 = re.findall(guize_dizhi3, response)[i]
dizhi = "西安" + "," + dizhi1 + "区" + "," +dizhi2 + "," + dizhi3
data = {
"主题" : zhuti,
"销售状况": shoukuang,
'类型': leixing,
"详情地址": zilianjie,
"均价" : junjia,
"面积" : mianji,
"地址" : dizhi,
}
csv_writer.writerow([zhuti, shoukuang, leixing, zilianjie, junjia, mianji, dizhi])
print(data)
if __name__ == '__main__':
f = open('lianjia.csv', 'w', encoding='utf-8', newline='')
csv_writer = csv.writer(f)
csv_writer.writerow(["主题", "销售状况", '类型',"详情地址","均价","面积","地址"])
request_cookies()
效果展示