简单的python爬虫下载图片_Python简单图片爬虫

import os

import requests

import lxml

from lxml import etree

Kinds = "dongman"

Down_way = "D://壁纸//"+Kinds+"//"

def down_index(start,stop):

if start == 1:

r = requests.get("http://netbian.com/"+Kinds)

html = etree.HTML(r.text)

result_index = html.xpath('//a[@target="_blank"]')

times = 0

for in_url in result_index:

if 14

save_pic(etree.tostring(in_url).decode('utf-8')[15:20])

times += 1

if times==36:

break

print("+++++++++++++++++++++++++++Funish |-1-|++++++++++++++++++++++++")

start = 2

for Index in range(start,stop+1):

r = requests.get("http://netbian.com/"+Kinds+"/index_{}.htm".format(Index))

html = etree.HTML(r.text)

result_index = html.xpath('//a[@target="_blank"]')

times = 0

for in_url in result_index:

if 14

save_pic(etree.tostring(in_url).decode('utf-8')[15:20])

times += 1

if times==36:

break

print("++++++++++++++++++++++++++++Funish|-{}-|++++++++++++++++++++++++".format(Index))

def save_pic(number):

r = requests.get("http://netbian.com/desk/{}.htm".format(number))

html = etree.HTML(r.text)

re_pic = html.xpath('//div[@class="pic"]')

for p_url in re_pic:

url2 = etree.tostring(p_url).decode('utf-8')[82:156]

r = requests.get(url2)

p = Down_way + str(number) + ".jpg"

print(p)

if not os.path.exists(p):

with open(p,'wb') as f:

f.write(r.content)

f.close()

def main():

print("图片输出地址"+Down_way)

if not os.path.exists(Down_way):

os.mkdir(Down_way)

try:

start = eval(input("请输入起始页(默认1):"))

except:

start = 1

try:

stop = eval(input("请输入结束页(默认3):"))

except:

stop = 3

down_index(start,stop)

print("==========================全部图片已下载========================")

main()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值