爬取图片
1.改网址
2.改正则
3.改本地保存地址
# -*- coding: utf-8 -*-
#导入所需第三方包。两个包自带。urllib.request用于请求网页,re用于正则表达式
#import request from urllib 和下面的意思一样,但是使用时可以少写'urllib.'
import urllib.request
import re
import time
#获取网页的html,与requests包一样的功能
def getHtml(url):
response = urllib.request.Request(url, headers = header)
page = urllib.request.urlopen(response)
html = page.read() #urllib用read()读取html;requests包用text读取html
return html
#获取图片对应的src属性代码
def getImg(html):
html=html.decode('utf-8')
#通过re-compile-findall二连函数操作来获取图片src属性对应的代码
#2.改正则
src = r'https://[^\s]*?\.jpg'
imgre = re.compile(src) #re.compile(),可以把正则表达式编译成一个正则表达式对象
imglist = re.findall(imgre, html)
#re.findall(),读取html中包含imgre(正则表达式)的数据,imglist是包含了所有src元素的数组
#print(imglist:=list(set(imglist)))
#获取到的网址去重
imglist=list(set(imglist))
#用urlretrieve下载图片。图片命名为0/1/2...之类的名字
x = 0
for imgurl in imglist:
#注意,这里的文件路径,每段路径的首字母一定要大写!!小写会识别出错,多占位符的使用
#3.改地址
urllib.request.urlretrieve(imgurl, 'E:\PythonTest\%s-%s.jpg' % (x, x))
x += 1
header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
#1.改网址
html = getHtml("www.baidu.com")
getImg(html)
print('OK')
利用了网页下一页的功能爬txt
基于python3.9.5
1.改urlstart
2.根据具体逻辑修改xieru方法
技术点:
etree:
xpath:
https://www.w3school.com.cn/xpath/xpath_syntax.asp
pythonlist:
https://www.runoob.com/python/python-lists.html
# -*- coding: utf-8 -*-
import requests
from lxml import etree
import time
urlstart = 'www.baidu.com'#输入我们的url
header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
def xieru(url,flag):
get = requests.get(url,headers=header).text # get(url) 得到我们的网页, text将源网页转化为字符串
selector = etree.HTML(get) # 将源码转换为xpath可以识别的TML格式
temp = selector.xpath('//div[@id="content"]/p/text()')
newlist = list(temp).copy()
if(temp[len(temp)-2].startswith("为优化阅读体验")&temp[len(temp)-1].startswith("继续阅读")):
newlist.pop()
newlist.pop()
title = '\r\n'.join(selector.xpath('//p[@class="title"]/text()')).replace(u'\xa9', u'')
detail = '\r\n'.join(newlist).replace(u'\xa9', u'')
nextTitle = selector.xpath('//a[@rel="next"]/text()')[0]
nextHref = selector.xpath('//a[@rel="next"]/@href')[0]
print(title+' '+nextHref)
fi=open('E://1.txt',"a",encoding='utf-8')
if(flag == 1):
fi.write('\r\n'+title+'\r\n'+detail)
else:
fi.write('\r\n'+detail)
fi.close()
time.sleep(1)
if("下一章" == nextTitle):
xieru(urlbase+selector.xpath('//a[@rel="next"]/@href')[0],1)
elif("下一页" == nextTitle):
xieru(urlbase+selector.xpath('//a[@rel="next"]/@href')[0],0)
else:
return
xieru(urlstart,1)