爬取并下载
上面网页的所有图片并存到本地的一个文件夹。(语言不限)(如果不能下载,请把图片链接爬出并写入文件)
最后提交成品:源代码一份。发送至邮箱:904727147@qq.com。完成之后加我个人微信:18362983803
以下是一个示例(python提取知乎当前最热的问答内容并写入本地)
#-*- coding: utf-8 -*-
import urllib.request
import re
from _io import open
def yunpan_search():
url = "https://www.zhihu.com/explore"
req = urllib.request.Request(url, headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
})
opener = urllib.request.urlopen(req)
html = opener.read()
html = html.decode('utf-8')
rex = '(?<=<textarea class="content hidden">\n).*?(?=<span class="answer-date-link-wrap">)'
m = re.findall(rex,html,re.S)
f = open('/root/Desktop/zhihu.txt','w')
for i in m:
f.write(i)
f.write('\n\n')
f.close()
print("抓取成功!")
file = open('/root/Desktop/zhihu.txt','r+')
fullfile = file.readlines()
text = []
p = re.compile(r'\w*', re.L)
pp = re.compile(r"(&;)*")
for line in fullfile:
lines = p.sub('',line)
liness = pp.sub('',lines)
text.append(liness)
file.seek(0)
file.truncate(0)
file.writelines(text)
file.close()
print("处理成功!")
if __name__=='__main__':
yunpan_search()