爬取千库网ppt_python爬取千库网

但是点进去就没了

这里先来测试是否有反爬虫

import requests

from bs4 import BeautifulSoup

import os

html = requests.get('https://i588ku.com/beijing/0-0-default-0-8-0-0-0-0-1/')

print(html.text)

输出是404,添加个ua头就可以了

可以看到每个图片都在一个div class里面,比如fl marony-item bglist_5993476,是3个class但是最后一个编号不同就不取

我们就可以获取里面的url

import requests

from bs4 import BeautifulSoup

import os

headers = {

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'

}

html = requests.get('https://i588ku.com/beijing/0-0-default-0-8-0-0-0-0-1/',headers=headers)

soup = BeautifulSoup(html.text,'lxml')

Urlimags = soup.select('div.fl.marony-item div a')

for Urlimag in Urlimags:

print(Urlimag['href'])

输出结果为

//i588ku.com/ycbeijing/5993476.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5991004.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5990729.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5991308.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5990409.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5989982.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5978978.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5993625.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5990728.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5951314.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5992353.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5993626.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5992302.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5820069.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5804406.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5960482.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5881533.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5986104.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5956726.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5986063.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5978787.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5954475.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5959200.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5973667.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5850381.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5898111.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5924657.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5975496.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5928655.html

//i588ku.com/comnew/vip/

//i588ku.com/ycbeijing/5963925.html

//i588ku.com/comnew/vip/

这个/vip是广告,过滤一下

for Urlimag in Urlimags:

if 'vip' in Urlimag['href']:

continue

print('http:'+Urlimag['href'])

然后用os写入本地

import requests

from bs4 import BeautifulSoup

import os

headers = {

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'

}

html = requests.get('https://i588ku.com/beijing/0-0-default-0-8-0-0-0-0-1/',headers=headers)

soup = BeautifulSoup(html.text,'lxml')

Urlimags = soup.select('div.fl.marony-item div a')

for Urlimag in Urlimags:

if 'vip' in Urlimag['href']:

continue

# print('http:'+Urlimag['href'])

imgurl = requests.get('http:'+Urlimag['href'],headers=headers)

imgsoup = BeautifulSoup(imgurl.text,'lxml')

imgdatas = imgsoup.select_one('.img-box img')

title = imgdatas['alt']

print('无水印:','https:'+imgdatas['src'])

if not os.path.exists('千图网图片'):

os.mkdir('千图网图片')

with open('千图网图片/{}.jpg'.format(title),'wb')as f:

f.write(requests.get('https:'+imgdatas['src'],headers=headers).content)

import requests

from bs4 import BeautifulSoup

import os

headers = {

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'

}

for i in range(1,11):

print('正在下载第{}页'.format(i))

html = requests.get('https://i588ku.com/beijing/0-0-default-0-8-0-0-0-0-{}/'.format(i),headers=headers)

soup = BeautifulSoup(html.text,'lxml')

Urlimags = soup.select('div.fl.marony-item div a')

for Urlimag in Urlimags:

if 'vip' in Urlimag['href']:

continue

# print('http:'+Urlimag['href'])

imgurl = requests.get('http:'+Urlimag['href'],headers=headers)

imgsoup = BeautifulSoup(imgurl.text,'lxml')

imgdatas = imgsoup.select_one('.img-box img')

title = imgdatas['alt']

print('无水印:','https:'+imgdatas['src'])

if not os.path.exists('千图网图片'):

os.mkdir('千图网图片')

with open('千图网图片/{}.jpg'.format(title),'wb')as f:

f.write(requests.get('https:'+imgdatas['src'],headers=headers).content)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值