爬取内部系统的房源信息和照片

爬取内部系统的房源信息和照片
1.登录复制cookies,制造headers
2.requests获得房源列表的信息
3.通过BeautifulSoup解析获得房源标题\房号\房源详情页等字段
4.通过requests获得房源详情页的信息
5.通过BeautifulSoup解析房源详情页,获得照片的url
6.保存info相关字段和房源照片内容到本地文件.

import requests
from bs4 import BeautifulSoup
import re
import traceback
import os

headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
cookies={'Cookie':''}

def gethtml(url):
    try:
        r = requests.get(url,headers = headers,cookies=cookies,timeout = 30)
        r.raise_for_status()
        r.encoding = 'utf-8' #r.apparent_encoding
        return r#.text.replace('<br />','')
    except:
        traceback.print_exc()
        print('爬取失败')

def getinfo(html):
    soup = BeautifulSoup(html.text,'lxml')
    soup.prettify()
    #print(soup)
    tags = soup.select('tbody > tr')
    #print(tags)
    info = '房号\t标题\t房价\t底价\t房型\t房屋照片\n'
    for tag in tags:
        try:
            tds = tag.find_all('td')
            #print(tds)
            roomNO=tds[0].text.strip()
            roomtitle = tds[1].find('a').text.strip()
            #print(roomNO)
            roompic_url = "http://house.i8oa.com/{}".format(tds[1].a['href'])
            roomprice = tds[2].span.text.strip()
            real_price = tds[2].find_all(class_= 'label label-danger m-left-xs')[0].text.strip()
            roomtype = tds[2].find_all(class_= 'label label-danger m-left-xs')[1].text.strip()
            info = f'{roomNO}\t{roomtitle}\t{roomprice}\t{real_price}\t{roomtype}\t{roompic_url}\n'
            #print(info)
            dic={'info':info,'roompic_url':roompic_url,'roomtitle':roomtitle,'roomNO':roomNO}
            yield dic
        except:
            #traceback.print_exc()
            continue

    

def savepic(roompic_url,roomtitle,roomNO):
    
    pic_html_text = gethtml(roompic_url).text
    soup = BeautifulSoup(pic_html_text,'lxml')
    pic_url_tags = soup.find_all('figure')
    num=0
    for pic_url_tag in pic_url_tags:
        try:
            pic_url = pic_url_tag.a['href']
            #print(pic_url)
            pic_content = gethtml(pic_url).content
            
            #print(dirname)
            #dirname = list(soup.find(class_='img-title').stripped_strings)[0].strip()
            dirname = roomtitle+"房"+roomNO
            root = ("D:/抓取资源/房源信息/{}/".format(dirname))
            num += 1
            picname = f"{dirname}第{str(num)}张{pic_url.split('/')[-1]}"
            fpath = root+picname
            print(fpath)
            if not os.path.exists(root):
                os.mkdir(root)
            with open (fpath,'ab') as f:
                f.write(pic_content)
                print("文件保存成功")
        except:
            traceback.print_exc()
            print("文件保存失败")

def saveinfo(info,filename):
    root = ("D:/抓取资源/房源信息/")
    path = root+filename
    try:
        if not os.path.exists(root):
            os.mkdir(root)
        with open (path,'a') as f:
            f.write(info)
            print("文件保存成功")
    except:
        traceback.print_exc()
        print("文件保存失败")

def main():             
    for i in range(1,48):
        url = 'http://house.i8oa.com/RoomManagement.aspx?houseState=736&Checkinstatus=754&usernamelo=SP779011&page={}'.format(str(i))
        html = gethtml(url)
        for r in getinfo(html):
            info,roompic_url,roomtitle,roomNO = r['info'],r['roompic_url'],r['roomtitle'],r['roomNO']
            filename = '麻雀房源0.text'
            saveinfo(info,filename)
            savepic(roompic_url,roomtitle,roomNO)
        prog = i*100/47
        print("\r当前进度:{}{:.2f}%".format('***'*int(prog/10),prog),end="")

        
main()

更多各大网站的房源信息,可以私信我

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值