python爬取58同城ip限制_Python爬取大量数据时,如何防止IP被封

前言

文的文字及图片来源于网络,仅供学习、交流使用,不具有任何商业用途,版权归原作者所有,如有问题请及时联系我们以作处理。

作者: By

PS:如有需要Python学习资料的小伙伴可以加点击下方链接自行获取

http://note.youdao.com/noteshare?id=3054cce4add8a909e784ad934f956cef

爬取了猪八戒上的一些数据可能是由于爬取的数据量有点多吧,结果我的IP被封了,需要自己手动来验证解封ip,但这显然阻止了我爬取更多的数据了。

下面是我写的爬取猪八戒的被封IP的代码

# coding=utf-8

import requests

from lxml import etree

def getUrl():

for i in range(33):

url = 'http://task.zbj.com/t-ppsj/p{}s5.html'.format(i+1)

spiderPage(url)

def spiderPage(url):

if url is None:

return None

htmlText = requests.get(url).text

selector = etree.HTML(htmlText)

tds = selector.xpath('//*[@class="tab-switch tab-progress"]/table/tr')

try:

for td in tds:

price = td.xpath('./td/p/em/text()')

href = td.xpath('./td/p/a/@href')

title = td.xpath('./td/p/a/text()')

subTitle = td.xpath('./td/p/text()')

deadline = td.xpath('./td/span/text()')

price = price[0] if len(price)>0 else ''    # python的三目运算 :为真时的结果 if 判定条件 else 为假时的结果

title = title[0] if len(title)>0 else ''

href = href[0] if len(href)>0 else ''

subTitle = subTitle[0] if len(subTitle)>0 else ''

deadline = deadline[0] if len(deadline)>0 else ''

print price,title,href,subTitle,deadline

print '---------------------------------------------------------------------------------------'

spiderDetail(href)

except:

print '出错'

def spiderDetail(url):

if url is None:

return None

try:

htmlText = requests.get(url).text

selector = etree.HTML(htmlText)

aboutHref = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/a/@href')

price = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/text()')

title = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/h2/text()')

contentDetail = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/div[1]/text()')

publishDate = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/p/text()')

aboutHref = aboutHref[0] if len(aboutHref) > 0 else ''  # python的三目运算 :为真时的结果 if 判定条件 else 为假时的结果

price = price[0] if len(price) > 0 else ''

title = title[0] if len(title) > 0 else ''

contentDetail = contentDetail[0] if len(contentDetail) > 0 else ''

publishDate = publishDate[0] if len(publishDate) > 0 else ''

print aboutHref,price,title,contentDetail,publishDate

except:

print '出错'

if '_main_':

getUrl()

如何防止爬取数据的时候被网站封IP这里有一些套路.查了一些套路

1.修改请求头

之前的爬虫代码没有添加头部,这里我添加了头部,模拟成浏览器去访问网站

user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'

headers = {'User-Agent': user_agent}

htmlText = requests.get(url, headers=headers, proxies=proxies).text

2.采用代理IP

当自己的ip被网站封了之后,只能采用代理ip的方式进行爬取,所以每次爬取的时候尽量用代理ip来爬取,封了代理还有代理。

# IP地址取自国内髙匿代理IP网站:http://www.xicidaili.com/nn/

# 仅仅爬取首页IP地址就足够一般使用

from bs4 import BeautifulSoup

import requests

import random

def get_ip_list(url, headers):

web_data = requests.get(url, headers=headers)

soup = BeautifulSoup(web_data.text, 'lxml')

ips = soup.find_all('tr')

ip_list = []

for i in range(1, len(ips)):

ip_info = ips[i]

tds = ip_info.find_all('td')

ip_list.append(tds[1].text + ':' + tds[2].text)

return ip_list

def get_random_ip(ip_list):

proxy_list = []

for ip in ip_list:

proxy_list.append('http://' + ip)

proxy_ip = random.choice(proxy_list)

proxies = {'http': proxy_ip}

return proxies

if __name__ == '__main__':

url = 'http://www.xicidaili.com/nn/'

headers = {

'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'

}

ip_list = get_ip_list(url, headers=headers)

proxies = get_random_ip(ip_list)

print(proxies)

生成代理ip,大家可以直接把这个代码拿去用

好了我用上面的代码给我生成了一批ip地址(有些ip地址可能无效,但只要不封我自己的ip就可以了,哈哈),然后我就可以在我的请求头部添加ip地址

给我们的请求添加代理ip

proxies = {

'http': 'http://124.72.109.183:8118',

'http': 'http://49.85.1.79:31666'

}

user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'

headers = {'User-Agent': user_agent}

htmlText = requests.get(url, headers=headers, timeout=3, proxies=proxies).text

最后完整代码如下:

# coding=utf-8

import requests

import time

from lxml import etree

def getUrl():

for i in range(33):

url = 'http://task.zbj.com/t-ppsj/p{}s5.html'.format(i+1)

spiderPage(url)

def spiderPage(url):

if url is None:

return None

try:

proxies = {

'http': 'http://221.202.248.52:80',

}

user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'

headers = {'User-Agent': user_agent}

htmlText = requests.get(url, headers=headers,proxies=proxies).text

selector = etree.HTML(htmlText)

tds = selector.xpath('//*[@class="tab-switch tab-progress"]/table/tr')

for td in tds:

price = td.xpath('./td/p/em/text()')

href = td.xpath('./td/p/a/@href')

title = td.xpath('./td/p/a/text()')

subTitle = td.xpath('./td/p/text()')

deadline = td.xpath('./td/span/text()')

price = price[0] if len(price)>0 else ''    # python的三目运算 :为真时的结果 if 判定条件 else 为假时的结果

title = title[0] if len(title)>0 else ''

href = href[0] if len(href)>0 else ''

subTitle = subTitle[0] if len(subTitle)>0 else ''

deadline = deadline[0] if len(deadline)>0 else ''

print price,title,href,subTitle,deadline

print '---------------------------------------------------------------------------------------'

spiderDetail(href)

except Exception,e:

print '出错',e.message

def spiderDetail(url):

if url is None:

return None

try:

htmlText = requests.get(url).text

selector = etree.HTML(htmlText)

aboutHref = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/a/@href')

price = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/text()')

title = selector.xpath('//*[@id="utopia_widget_10"]/div[1]/div/div/h2/text()')

contentDetail = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/div[1]/text()')

publishDate = selector.xpath('//*[@id="utopia_widget_10"]/div[2]/div/div[1]/p/text()')

aboutHref = aboutHref[0] if len(aboutHref) > 0 else ''  # python的三目运算 :为真时的结果 if 判定条件 else 为假时的结果

price = price[0] if len(price) > 0 else ''

title = title[0] if len(title) > 0 else ''

contentDetail = contentDetail[0] if len(contentDetail) > 0 else ''

publishDate = publishDate[0] if len(publishDate) > 0 else ''

print aboutHref,price,title,contentDetail,publishDate

except:

print '出错'

if '_main_':

   getUrl()

数据全部爬取出来了,且我的IP也没有被封。当然防止被封IP肯定不止这些了,这还需要进一步探索!

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值