爬取贝壳平台新房数据并可视化

  • 先爬取数据
import requests
import time
import random
from lxml import etree
import csv
# 设置请求头
user_agents = [ "Mozilla/5.0 (Windows NT 5.1; rv:30.0) Gecko/20100101 Firefox/30.0",
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0",
                "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0",
                "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
                "Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0",
                "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0",
                "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0",
                "Mozilla/5.0 (X11; U; Linux Core i7-4980HQ; de; rv:32.0; compatible; JobboerseBot; Gecko/20100101 Firefox/38.0",
                "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0"]
def get_html():
    # 获取页面信息
    for page in range(1, 101):
        url = "https://wh.fang.ke.com/loupan/hannan-caidian-jiangxia-huangbei-xinzhou-jiangan-jianghan-wuchang-qiaokou-hanyang-hongshan-dongxihu-qingshan-donghugaoxin-zhuankoukaifaqu/pg{}/#donghugaoxin".format(page)
        # 设置随机用户代理
        header = {
   "user-agent": random.choice(user_agents)}
        try:
            r = requests.get(url, headers=header).text
        except Exception as e:
            print(e)
        else:
            get_data(r)
            time.sleep(2)
            
def get_data(r):
    # 解析页面数据
    html = etree.HTML
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值