python——基于煎蛋网的简单图片爬虫

import urllib.request
import os

def get_url(url):
	req = urllib.request.Request(url)
	req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')
	response = urllib.request.urlopen(req)
	html = response.read().decode('utf-8','ignore')
	
	a=html.find('current-comment-page')+23
	b=html.find(']',a)
	
	return html[a:b]

def find_url(page_url):
	req = urllib.request.Request(page_url)
	req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')
	response = urllib.request.urlopen(req)
	html = response.read().decode('utf-8','ignore')
	
	url_list=[]
	
	a=html.find('img src=')
	
	while a != -1:
		b=html.find('.jpg',a,a+255)
		if b != -1:
			url_list.append(html[a+9:b+4])
		else :
			b=a+9
		a=html.find('img src=',b)
		
	return url_list


def save_pic(folder,list):
	for each in list :
		filename=each.split('/')[-1]
		with open(filename,'wb') as f:
			each='http:'+each
			req = urllib.request.Request(each)
			req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')
			response = urllib.request.urlopen(req)
			html = response.read()
			f.write(html)
	
	
	

def download(file='demo',page=10):
	os.mkdir(file)
	os.chdir(file)
	
	url='http://jandan.net/ooxx/'
	
	page_num=int(get_url(url))
	for i in range(page):
		page_num -= i
		page_url=url+'page-'+str(page_num) +'#comments'
		list=find_url(page_url)
		save_pic(file,list)

	
if __name__=='__main__':
	download()
		
		
		
		

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值