requests网页下载库
发送reques请求
requests.get/post(url,params,data,headers,timeout,verify,allow_redirects,cookies)
url:下载的目标网页地址;
params:字典形式,设置url后面的参数,比如?id=123&name=xiaoming;
data:字典或者字符串,一般用于post方法时提交数据;
headers:设置user-agent,refer等请求头;
timeout:超时时间,单位是秒;
verify:True/False,是否进行HTTPS证书验证,默认是,需要自己设置证书地址;
allow_redirects:True/False是否让requests做重定向处理,默认是;
cookies:附带本地的cookies数据。
接收response响应
r=requests.get/post(url)
//查看状态码,200代表成功
r.status_code
//查看当前的编码,以及变更编码
r.encoding
//查看返回的网页内容
r.text
//查看返回的HTTP的headers
r.headers
//查看实际访问的URL
r.url
//以字节的方式返回内容,比如下载图片
r.content
//服务端要写入本地的cookies数据
r.cookies
基本方法
import requests
url='http://www.crazyant.net'
r=requests.get(url)
print(r.status_code)
print(r.headers)
print(r.encoding)
print(r.text)
print(r.cookies)
URL管理器
class UrlManager():
def __init__(self):
self.new_urls=set()
self.old_urls=set()
def add_new_url(self,url):
if url is None or len(url)==0:
return
if url in self.new_urls or url in self.old_urls:
return
self.new_urls.add(url)
def add_new_urls(self,urls):
if urls is None or len(urls)==0:
return
for url in urls:
self.add_new_url(url)
def get_url(self):
if self.has_new_url():
url=self.new_urls.pop()
self.old_urls.add(url)
return url
else:
return None
def has_new_url(self):
return len(self.new_urls)>0
if __name__=="__main__":
url_manager=UrlManager()
url_manager.add_new_url("url1")
url_manager.add_new_urls(["url1","url2"])
print(url_manager.new_urls,url_manager.old_urls)
网页解析器—Beautiful Soup
Beautiful Soup
用于从HTML中提取数据
创建BeautifulSoup对象
from bs4 import BeautifulSoup
#根据HTML网页字符串创建beautifulsoup对象
soup=BeautifulSoup(
html_doc, #HTML文档字符串
'html.parser', #html解析器
from_encoding='utf8' #html文档编码
)
搜索节点(find_all,find)
#方法:find_all(name,attrs,string)
#查找所有标签为a的节点
soup.find_all('a')
#查找所有标签为a,链接符合/view/123.html形式的节点
soup.find_all('a',href='/view/123.html')
#查找所有标签为div,class为abc,文字为python的节点
soup.find_all('div',class_='abc',string='pyhton')
访问节点信息
#得到节点:<a href='1.html'>Python</a>
#获取查找的节点的标签名称
node.name
#获取查找到的a节点href属性
node['href']
#获取查找到的a节点的链接文字
node.get_text()
对网站进行分析
from bs4 import BeautifulSoup
import fin
with open('./test.html', encoding='utf-8') as fin:
html_doc = fin.read()
soup=BeautifulSoup(html_doc,'html.parser')
#现定位到div
div_node=soup.find('div',id="content")
links=div_node.find_all('a')
for link in links:
print(link.name,link['href'],link.get_text())
实战
爬取博客网站全部文章列表
根域名:http://www.crazyant.net
文章页URL形式:http://www.crazyant.net/2261.html
#知识点:requests请求时附带cookie字典
import requests
cookies={
'captchaKey':'14a54079a1',
'capthaExpire':'1548852352'
}
r=requests.get(
'http://url',
cookies=cookies
)
#知识点:正则表达式实现模糊匹配
url1='http://www.crazyant.net/2261.html'
url2='http://www.crazyant.net/2261.html#comments'
url3='http://www.baidu.com'
import re
pattern=r'^http://www.crazyant.net/\d+.html$'
print(re.math(pattern,url1))
print(re.math(pattern,url2))
print(re.math(pattern,url3))
89小说网站爬虫
import requests
from bs4 import BeautifulSoup
import unicodedata
import re
def get_novel_chapters():
rooturl = 'http://www.89wx.cc/40/40293/'
r = requests.get(rooturl)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
data = []
for dd in soup.find_all('dd'):
link = dd.find('a')
if not link:
continue
data.append(('http://www.89wx.cc%s' % link['href'], link.get_text()))
return data
def get_chapter_content(url):
r = requests.get(url)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
return soup.find('div', id='content').get_text()
#爬取进程的打印
novel_chapters=get_novel_chapters()
total_cnt=len(novel_chapters)
idx=0
for chapter in get_novel_chapters():
idx+=1
url, title = chapter
print(idx,total_cnt)
with open('%s.txt'%title, 'w', encoding='utf-8') as fout: # 使用utf-8编码写入文件
fout.write(get_chapter_content(url))
break