使用代理IP爬取房价

import requests as req
import time
import pandas as pd
from bs4 import BeautifulSoup
from sqlalchemy import create_engine
import os

global info
#domain = "https://sz.esf.fang.com/"
city = "house/"


def getHouseInfo(url, ipproxies):
    info = {}
    soup = BeautifulSoup(req.get(url, proxies=ipproxies).text, "html.parser")
    resinfo = soup.select(".tab-cont-right .trl-item1")
    loca_info = soup.select(".tr-line .trl-item2")
    print(url)
    #print(resinfo)
    #print(loca_info)

    # 获取户型、建筑面积、单价、朝向、楼层、装修情况
    for re in resinfo:
        tmp = re.text.strip().split("\n")
        #print(tmp)
        name = tmp[1].strip()
        #print('*********')
        #print(name)
        if ("朝向" in name):
            name = name.strip("进门")
        if ("楼层" in name):
            name = name[0:2]
        if ("地上层数" in name):
            name = "楼层"
        if ("装修程度" in name):
            name = "装修"
        info[name] = tmp[0].strip()

    xiaoqu = soup.select(".rcont .blue")[0].text.strip()
    info["小区名字"] = xiaoqu
    zongjia = soup.select(".tab-cont-right .trl-item")
    info["总价"] = zongjia[0].text
    info['区'] = soup.select(".rcont .blue")[1].text.strip()
    info['镇'] = soup.select(".rcont .blue")[2].text.strip()
    #for temp in location:
     #   print(temp)
    print(info)
    print("##################")
    return info
# 获取总页数
def getTotalPage(domain, city):
    res = req.get(domain + city + "i31")
    soup = BeautifulSoup(res.text, "html.parser")
    endPage = soup.select(".page_al a").pop()['href']
    pageNum = endPage.strip("/").split("/")[1].strip("i3")
    print("loading.....总共 " + pageNum + " 页数据.....")
    return pageNum


# 分页爬取数据
def pageFun(domain, city, i, ipproxies):
    pageUrl = domain + city + "i3" + i
    print(pageUrl + " loading...第 " + i + " 页数据.....")
    res = req.get(pageUrl, proxies=ipproxies)
    soup = BeautifulSoup(res.text, "html.parser")
    houses = soup.select(".shop_list dl")
    pageInfoList = []
    for house in houses:
        try:
            # print(domain + house.select("a")[0]['href'])
            info = getHouseInfo(domain + house.select("a")[0]['href'], ipproxies)
            pageInfoList.append(info)
            print(info)
        except Exception as e:
            print("---->出现异常,跳过 继续执行", e)

    df = pd.DataFrame(pageInfoList)
    return df

def makeFile(path):
    if not os.path.exists(path):
        os.makedirs(path)


def getIPPool():
    test_url = 'https://www.baidu.com/'
    ip_url = 'http://www.xicidaili.com/'
    captcha_12306 = 'https://kyfw.12306.cn/passport/captcha/captcha-image'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)\
        AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36', }
    ip_response = req.get(ip_url, headers=headers).text
    ip_soup = BeautifulSoup(ip_response, 'lxml')
    # print(ip_soup)

    tag_tr = ip_soup.find_all('tr')
    IPPool = []
    for tr in tag_tr:
        ip_info = {
            'ip1': '',
            'port2': '',
            'location3': '',
            'type5': '',
            'live_time6': '',
            'verify_time7': ''
        }
        if len(tr.find_all('td')) == 8:
            # print(tr.find_all('td')[1].contents[0])
            try:
                # print(tr.find_all('td'))
                ip_info['ip1'] = tr.find_all('td')[1].contents[0]
                ip_info['port2'] = tr.find_all('td')[2].contents[0]
                ip_info['location3'] = tr.find_all('td')[3].contents[0]
                ip_info['type5'] = tr.find_all('td')[5].contents[0]
                ip_info['live_time6'] = tr.find_all('td')[6].contents[0]
                ip_info['verify_time7'] = tr.find_all('td')[7].contents[0]
                IPPool.append(ip_info)
            except:
                pass
    return IPPool

def getIPPool2():
    '''
    从891网址获取IP池
    :return:
    '''
    test_url = 'https://www.baidu.com/'
    IPPool = []
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)\
                AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36', }
    for i in range(0, 10):
        ip_url = 'http://www.89ip.cn/index_' + str(i) + '.html/'         #获取前10页url
        #ip_url = 'http://www.89ip.cn/index_3.html/'
        #headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)\
            #AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36', }
        ip_response = req.get(ip_url, headers=headers).text
        ip_soup = BeautifulSoup(ip_response, 'lxml')
        #print(ip_soup)

        tag_tr = ip_soup.find_all('tr')

        for tr in tag_tr:
            ip_info = {
                'ip1': '',
                'port2': '',
                'location3': '',
                'type5': '',
                'live_time6': '',
                'verify_time7': ''
            }
            #print(tr)
            #print(len(tr))
            if len(tr) == 11:
                print("#################")
                if (not tr.find_all('td')):
                    continue
                print(tr.find_all('td')[0].contents[0])
                try:
                    # print(tr.find_all('td'))
                    ip_info['ip1'] = tr.find_all('td')[0].contents[0].split('\t')[3]
                    ip_info['port2'] = tr.find_all('td')[1].contents[0].split('\t')[3]
                    ip_info['location3'] = tr.find_all('td')[2].contents[0]
                    ip_info['type5'] = 'noType'
                    ip_info['live_time6'] = tr.find_all('td')[4].contents[0]
                    ip_info['verify_time7'] = tr.find_all('td')[4].contents[0]
                    IPPool.append(ip_info)
                except:
                    pass
    print(IPPool)
    return IPPool
def writePriceToData(domain, city):
    '''
    获取房屋的信息并存入CSV
    :param domain: 房产地址
    :param city:
    :return:
    '''
    timeFormat = time.strftime('%Y%m%d %H%M%S',time.localtime())
    path = 'F:\\python\\housePrice'
    makeFile(path)
    fileName = path + '\\'+ domain.split('.')[0].split('/')[-1] + timeFormat + '.csv'
    all_Price = pd.DataFrame()
    if len(getIPPool()) != 0:
        ip_pool = getIPPool()
        print('使用西刺代理!')
    elif len(getIPPool2()) != 0:
        ip_pool = getIPPool2()
        print('使用891代理!')
    else:
        print('获取代理IP失败 ,请检查代理网址是否可用 !')
        return -1
    print(ip_pool)
    for i in range(1, int(getTotalPage(domain, city)) + 1):
        for ip in ip_pool:
            ipProxies = {
                'http': 'https://' + ip['ip1']
            }
            print(ipProxies)
            try:
                df_onePage = pageFun(domain, city, str(i), ipProxies)
                all_Price = all_Price.append(df_onePage)
                break
            except Exception as e:
                continue
                print("Exception", e)
    all_Price.to_csv(fileName, encoding='utf-8-sig')
        #pd.io.sql.to_sql(df_onePage, "city_house_price", connect, schema="houseinfo", if_exists="append")
    return 0
domains = ['https://sz.esf.fang.com/', 'https://gz.esf.fang.com/']
#domains = ['https://sh.esf.fang.com/', 'https://hz.esf.fang.com/', 'https://zz.esf.fang.com/', 'https://zhumadian.esf.fang.com/', 'https://xinyang.esf.fang.com/']


for domain in domains:
    print(domain)
    writePriceToData(domain, city)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
我可以告诉你如何使用代理IP爬取豆瓣电影网,但请注意这可能涉及到一些法律和道德问题,因此请确保你有合法的许可和良好的意图。 首先,你需要获取一些代理IP地址,你可以从一些付费的代理商那里购买,也可以使用一些免费的代理网站。 然后,你需要使用编程语言例如Python来编写爬虫程序。在程序中,你需要设置代理IP地址,使得每次请求都会使用不同的IP地址。 具体来说,你可以使用Python的requests库来发送HTTP请求,并在请求头中设置代理IP地址。以下是一个简单的示例代码: ```python import requests # 代理IP地址 proxies = { 'http': 'http://127.0.0.1:8080', 'https': 'https://127.0.0.1:8080', } # 请求头 headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' } # 发送请求 response = requests.get('https://movie.douban.com/', headers=headers, proxies=proxies) # 输出响应内容 print(response.text) ``` 在这个示例代码中,我们使用了一个代理IP地址 `http://127.0.0.1:8080`,并将其设置为HTTP和HTTPS协议的代理。我们还设置了一个请求头,以伪装成一个浏览器发送请求。最后,我们使用requests库发送HTTP请求,并输出响应内容。 请注意,使用代理IP爬取网站可能会遇到一些问题,例如代理IP可能不稳定,可能会被网站封禁等等。因此,你需要小心使用,并确保你的行为合法和道德。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值