发现一个很酷的网页,想把它保存下来怎么办? ??
网上找离线下载工具? 找了半天,没有找到合适的。
决定自己写一个, 我想可能以后会用到。
第一种方法是使用wget方式下载。 比较简单,但是一些高级的自定义的无法实现。 例如有的网页是如下这种方式,就没有办法爬了。
wget \
--page-requisites \
--convert-links \
--domains www.17sucai.com \
--no-parent \
http://www.17sucai.com/preview/949344/2018-03-19/LoginHTML/demo.html
保存之后的路径
参数说明
The options are:
–recursive: download the entire Web site.
–domains website.org: don’t follow links outside website.org.
–no-parent: don’t follow links outside the directory tutorials/html/.
–page-requisites: get all the elements that compose the page (images, CSS and so on).
–html-extension: save files with the .html extension.
–convert-links: convert links so that they work locally, off-line.
–restrict-file-names=windows: modify filenames so that they will work in Windows as well.
–no-clobber: don’t overwrite any existing files (used in case the download is interrupted and resumed).
再提供一种python的方法
用python的方法稍微复杂一点,但是可以满足灵活的需求。
# -*- coding:UTF8 -*-
import re
headers = {'Host': 'riji.bozhong.com',
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"User-Agent": "Baiduspider+(+http://www.baidu.com/search/spider.htm)",
"Upgrade-Insecure-Requests": "1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language":"zh,zh-CN;q=0.9,en-US;q=0.8,en;q=0.7"}
from urllib.parse import quote
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, http.cookiejar
import os
import string
#from urlparse import urljoin
def sendwithhead(url,data,headers):
ret = ""
try:
if headers is None:
request = urllib.request.Request(url, data)
else:
request = urllib.request.Request(url, data, headers)
ret = urllib.request.urlopen(request).read().decode('utf-8')
#print(ret)
except Exception as e:
print(e)
return ret
def write(mydir,filename,content):
if not os.path.exists(mydir):
os.makedirs(mydir)
mypath = (mydir+filename)
#print(mydir + filename,mypath )
output = open(mypath, 'w',encoding="utf-8")
output.write(content)
output.close()
def saveurl(url,mydir,filename,headers):
# mydir = mydir.replace("\\\\","\\")
if not os.path.exists( (mydir+"/"+filename) ) \
or os.path.getsize( (mydir+"/"+filename) )==0:
url = quote(url, safe=string.printable)
content = sendwithhead(url, None, headers)
write(mydir,filename,content)
def saveimg(url,localpath):
try:
if not os.path.exists(localpath) \
or os.path.getsize(localpath) == 0:
dirname = os.path.dirname(localpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
url = quote(url, safe=string.printable)
urllib.request.urlretrieve(url,localpath)
except Exception as e:
print(e)
def read(filepath):
file_object = open(filepath,encoding="utf8")
try:
all_the_text = file_object.read()
finally:
file_object.close()
return all_the_text
def getUrl(html):
patternjs = '<script.*?src="(.*?)"'
patternimg = '<img.*?src="(.*?)"'
patterncss = '<link.*?href="(.*?)"'
patternimg2 = '<div.*?data-src="(.*?)"'
href = re.compile(patternjs, re.S | re.I ).findall(html)
href += re.compile(patternimg, re.S | re.I).findall(html)
href += re.compile(patterncss, re.S | re.I).findall(html)
href += re.compile(patternimg2, re.S | re.I).findall(html)
return href
rootpath = "C:/Users/xwm/Desktop/wangye/"
url = "http://www.17sucai.com/preview/949344/2018-03-19/LoginHTML/demo.html"
filename = url.split("/")[-1]
saveurl(url, rootpath , filename,headers)
html = read(rootpath+filename)
urls = getUrl(html)
for item in urls:
print(item)
myurl = urllib.parse.urljoin(url,item)
if "http" in myurl:
o = urllib.parse.urlparse(item)
filename = myurl.split("/")[-1]
filedir = o.path.replace(filename,"")
if item.endswith(".jpg") or item.endswith(".png") or item.endswith(".gif") :
saveimg(myurl, rootpath + o.path)
else:
saveurl(myurl, rootpath + filedir, filename, None)