爬虫 requests与BeautifulSoup 模块/方法/参数 详解

import requests

1. 方法

requests.get
requests.post 
requests.put 
requests.delete 
...
requests.request(method='POST')

2. 参数

2.1  url
2.2  headers
2.3  cookies
2.4  params     
2.5  data,传请求体
	requests.post(
	....
	date = {'user':'mouni','pwd':'123'}
	)
	
	显示会以这种形式显示
		GET /index http1.1\r\nhost:c1.com\r\n\r\nuser=mouni&pwd=123

        
2.6  json,传请求体
        requests.post(
            ...,
            json={'user':'alex','pwd':'123'}
        )
	显示会以这种形式显示
        GET /index http1.1\r\nhost:c1.com\r\nContent-Type:application/json\r\n\r\n{"user":"mouni","pwd":123}
        
2.7 代理 proxies
    # 无验证
        proxie_dict = {
            "http": "61.172.249.96:80",
            "https": "http://61.185.219.126:3128",
        }
        ret = requests.get("https://www.proxy360.cn/Proxy", proxies=proxie_dict)
        
    
    # 验证代理
        from requests.auth import HTTPProxyAuth
        
        proxyDict = {
            'http': '77.75.105.165',
            'https': '77.75.106.165'
        }
        auth = HTTPProxyAuth('用户名', '密码')
        
        r = requests.get("http://www.google.com",data={'xxx':'ffff'} proxies=proxyDict, auth=auth)
        print(r.text)
-----------------------------------------------------------------------------------------
2.8 文件上传 files
    # 发送文件
        file_dict = {
            'f1': open('xxxx.log', 'rb')
        }
        requests.request(
            method='POST',
            url='http://127.0.0.1:8000/test/',
            files=file_dict
        )
        
2.9 认证 auth

    内部:
        用户名和密码,用户和密码加密,放在请求头中传给后台。
        
            - "用户:密码"
            - base64("用户:密码")
            - "Basic base64("用户|密码")"
            - 请求头:
                Authorization: "basic base64("用户|密码")"
        
    from requests.auth import HTTPBasicAuth, HTTPDigestAuth

    ret = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('wupeiqi', 'sdfasdfasdf'))
    print(ret.text)
    
2.10 超时 timeout 
    # ret = requests.get('http://google.com/', timeout=1)
    # print(ret)

    # ret = requests.get('http://google.com/', timeout=(5, 1))
    # print(ret)
    
2.11 允许重定向  allow_redirects
    ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
    print(ret.text)
    
2.12 大文件下载 stream
    from contextlib import closing
    with closing(requests.get('http://httpbin.org/get', stream=True)) as r1:
    # 在此处理响应。
    for i in r1.iter_content():
        print(i)
        
2.13 证书 cert
    - 百度、腾讯 => 不用携带证书(系统帮你做了)
    - 自定义证书
        requests.get('http://127.0.0.1:8000/test/', cert="xxxx/xxx/xxx.pem")
        requests.get('http://127.0.0.1:8000/test/', cert=("xxxx/xxx/xxx.pem","xxx.xxx.xx.key"))
2.14 确认 verify =False 

BeautifulSoup
是一个模块,该模块用于接收一个HTML或XML字符串,然后将其进行格式化,之后遍可以使用他提供的方法进行快速查找指定元素,从而使得在HTML或XML中查找指定元素变得简单

from bs4 import BeautifulSoup
 
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
asdf
    <div class="title">
        <b>The Dormouse's story总共</b>
        <h1>f</h1>
    </div>
<div class="story">Once upon a time there were three little sisters; and their names were
    <a  class="sister0" id="link1">Els<span>f</span>ie</a>,
    <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
    <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</div>
ad<br/>sf
<p class="story">...</p>
</body>
</html>
"""
 
soup = BeautifulSoup(html_doc, features="lxml")
# 找到第一个a标签
tag1 = soup.find(name='a')
# 找到所有的a标签
tag2 = soup.find_all(name='a')
# 找到id=link2的标签
tag3 = soup.select('#link2')

安装

pip3 install beautifulsoup4

1 、 name,标签名称

	 tag = soup.find('a')
	 name = tag.name # 获取
 	 print(name)
	 tag.name = 'span' # 设置
	 print(soup)

2、 attr,标签属性

	 tag = soup.find('a')
	 attrs = tag.attrs    # 获取
	 print(attrs)
	 tag.attrs = {'ik':123} # 设置
	 tag.attrs['id'] = 'iiiii' # 设置
	 print(soup)

3、 children,所有子标签

	body = soup.find('body')
	v = body.children

4、 children,所有子子孙孙标签

	 body = soup.find('body')
	 v = body.descendants

5、clear,将标签的所有子标签全部清空(保留标签名)

	 tag = soup.find('body')
	 tag.clear()
	 print(soup)

6、decompose,递归的删除所有的标签

body = soup.find('body')
body.decompose()
print(soup)

7、 extract,递归的删除所有的标签,并获取删除的标签

body = soup.find('body')
v = body.extract()
print(soup)

8、decode,转换为字符串(含当前标签);decode_contents(不含当前标签)

body = soup.find('body')
v = body.decode()
v = body.decode_contents()
print(v)

9、 encode,转换为字节(含当前标签);encode_contents(不含当前标签)

body = soup.find('body')
v = body.encode()
v = body.encode_contents()
print(v)

10、 find,获取匹配的第一个标签

tag = soup.find('a')
print(tag)
tag = soup.find(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
tag = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
print(tag)

11、 find_all,获取匹配的所有标签

tags = soup.find_all('a')
print(tags)

tags = soup.find_all('a',limit=1)
print(tags)

tags = soup.find_all(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
# tags = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
print(tags)

####### 列表 #######

v = soup.find_all(name=['a','div'])
print(v)

v = soup.find_all(class_=['sister0', 'sister'])
print(v)

v = soup.find_all(text=['Tillie'])
print(v, type(v[0]))


v = soup.find_all(id=['link1','link2'])
print(v)

v = soup.find_all(href=['link1','link2'])
print(v)

####### 正则 #######

import re

rep = re.compile('p')
rep = re.compile('^p')
v = soup.find_all(name=rep)
print(v)
rep = re.compile('sister.*')
v = soup.find_all(class_=rep)
print(v)

rep = re.compile('http://www.oldboy.com/static/.*')
v = soup.find_all(href=rep)
print(v)

####### 方法筛选 #######
def func(tag):
return tag.has_attr('class') and tag.has_attr('id')
v = soup.find_all(name=func)
print(v)


## get,获取标签属性
tag = soup.find('a')
v = tag.get('id')
print(v)

12、 has_attr,检查标签是否具有该属性

tag = soup.find('a')
v = tag.has_attr('id')
print(v)

13、 get_text,获取标签内部文本内容

tag = soup.find('a')
v = tag.get_text('id')
print(v)

14、 index,检查标签在某标签中的索引位置

tag = soup.find('body')
v = tag.index(tag.find('div'))
print(v)

tag = soup.find('body')
for i,v in enumerate(tag):
print(i,v)

15、 is_empty_element,是否是空标签(是否可以是空)或者自闭合标签,

判断是否是如下标签:'br' , 'hr', 'input', 'img', 'meta','spacer', 'link', 'frame', 'base'

tag = soup.find('br')
v = tag.is_empty_element
print(v)

16、 当前的关联标签

soup.next
soup.next_element
soup.next_elements
soup.next_sibling
soup.next_siblings


tag.previous
tag.previous_element
tag.previous_elements
tag.previous_sibling
tag.previous_siblings


tag.parent
tag.parents

17、 查找某标签的关联标签

tag.find_next(...)
tag.find_all_next(...)
tag.find_next_sibling(...)
tag.find_next_siblings(...)

tag.find_previous(...)
tag.find_all_previous(...)
tag.find_previous_sibling(...)
tag.find_previous_siblings(...)

tag.find_parent(...)
tag.find_parents(...)

参数同find_all

18、 select,select_one, CSS选择器

soup.select("title")
 
soup.select("p nth-of-type(3)")
 
soup.select("body a")
 
soup.select("html head title")
 
tag = soup.select("span,a")
 
soup.select("head > title")
 
soup.select("p > a")
 
soup.select("p > a:nth-of-type(2)")
 
soup.select("p > #link1")
 
soup.select("body > a")
 
soup.select("#link1 ~ .sister")
 
soup.select("#link1 + .sister")
 
soup.select(".sister")
 
soup.select("[class~=sister]")
 
soup.select("#link1")
 
soup.select("a#link2")
 
soup.select('a[href]')
 
soup.select('a[href="http://example.com/elsie"]')
 
soup.select('a[href^="http://example.com/"]')
 
soup.select('a[href$="tillie"]')
 
soup.select('a[href*=".com/el"]')
 
 
from bs4.element import Tag
 
def default_candidate_generator(tag):
	for child in tag.descendants:
		if not isinstance(child, Tag):
			continue
		if not child.has_attr('href'):
			continue
		yield child
 
tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator)
print(type(tags), tags)
 
from bs4.element import Tag
def default_candidate_generator(tag):
	for child in tag.descendants:
		if not isinstance(child, Tag):
			continue
		if not child.has_attr('href'):
			continue
		yield child
 
tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator, limit=1)
print(type(tags), tags)

19、 标签的内容

tag = soup.find('span')
print(tag.string)          # 获取
tag.string = 'new content' # 设置
print(soup)

tag = soup.find('body')
print(tag.string)
tag.string = 'xxx'
print(soup)

tag = soup.find('body')
v = tag.stripped_strings  # 递归内部获取所有标签的文本
print(v)

20、append在当前标签内部追加一个标签

tag = soup.find('body')
tag.append(soup.find('a'))
print(soup)

from bs4.element import Tag
obj = Tag(name='i',attrs={'id': 'it'})
obj.string = '我是一个新来的'
tag = soup.find('body')
tag.append(obj)
print(soup)

21、.insert在当前标签内部指定位置插入一个标签

from bs4.element import Tag
obj = Tag(name='i', attrs={'id': 'it'})
obj.string = '我是一个新来的'
tag = soup.find('body')
tag.insert(2, obj)
print(soup)

22、 insert_after,insert_before 在当前标签后面或前面插入

from bs4.element import Tag
obj = Tag(name='i', attrs={'id': 'it'})
obj.string = '我是一个新来的'
tag = soup.find('body')
# tag.insert_before(obj)
tag.insert_after(obj)
print(soup)

23、 replace_with 在当前标签替换为指定标签

from bs4.element import Tag
obj = Tag(name='i', attrs={'id': 'it'})
obj.string = '我是一个新来的'
tag = soup.find('div')
tag.replace_with(obj)
print(soup)

24、 创建标签之间的关系

tag = soup.find('div')
a = soup.find('a')
tag.setup(previous_sibling=a)
print(tag.previous_sibling)

25、 wrap,将指定标签把当前标签包裹起来

from bs4.element import Tag
obj1 = Tag(name='div', attrs={'id': 'it'})
obj1.string = '我是一个新来的'

tag = soup.find('a')
v = tag.wrap(obj1)
print(soup)

tag = soup.find('a')
v = tag.wrap(soup.find('p'))
print(soup)

26、 unwrap,去掉当前标签,将保留其包裹的标签

tag = soup.find('a')
v = tag.unwrap()
print(soup)

摘自于 https://www.cnblogs.com/wupeiqi/articles/6283017.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值