安装
pip install resquests
pip install lxml
requests的基本使用
- 基本GET请求
import requests
url = "http://www.baidu.com/"
# 添加请求头 headers 是一个字典,可以添加请求头参数,如cookis等
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
response = requests.get(url=url, headers=headers)
# 查看响应内容,response.text 返回的是Unicode格式的数据
print(response.text)
# 查看响应内容,response.content返回的字节流数据,通常是以utf-8来解码
print(response.content.decode())
# 查看完整url地址,输出为:https://www.baidu.com/
print(response.url)
# 查看响应头部字符编码,输出为:utf-8
print(response.encoding)
# 查看响应码, 输出为:200
print(response.status_code)
- 基本POST请求
import requests
import json
url = "http://fy.iciba.com/ajax.php?a=fy"
# 添加请求头 headers 是一个字典,可以添加请求头参数,如cookis等
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
# data数据
data = {
"f": "auto",
"t": "auto",
"w": "你好"
}
response = requests.post(url=url, headers=headers, data=data)
data_json = response.content.decode()
# 将json数据转换成字典
data_dict = json.loads(data_json)
print(data_dict)
-
代理(proxies参数)
可以代理ip,避免被识别出爬虫封ip地址
import requests
# 根据协议类型,选择不同的代理
proxy = {
"http": "http://12.34.56.79:9527",
"https": "http://12.34.56.79:9527",
}
# 私密代理ip写法
proxy = {
'http': 'http://user:pwd@101.231.104.82:80',
'https': 'https://user:pwd@101.231.104.82:80',
# 'https': 'https://1.192.246.63:9999',
}
response = requests.get("http://www.baidu.com", proxies=proxy)
print(response.text)
-
Cookies 和 Sission
Cookies 如果一个响应中包含了cookie,那么我们可以利用 cookies参数拿到:
import requests
response = requests.get("http://www.baidu.com/")
# 返回CookieJar对象:
cookiejar = response.cookies
# 将CookieJar转为字典:
cookiedict = requests.utils.dict_from_cookiejar(cookiejar)
print(cookiejar)
print(cookiedict)
"""
输出:
<RequestsCookieJar[<Cookie BDORZ=27315 for .baidu.com/>]>
{'BDORZ': '27315'}
"""
session
在 requests 里,session对象是一个非常常用的对象,这个对象代表一次用户会话:从客户端浏览器连接服务器开始,到客户端浏览器与服务器断开。
会话能让我们在跨请求时候保持某些参数,比如在同一个 Session 实例发出的所有请求之间保持 cookie 。
import requests
# 1. 创建session对象,可以保存Cookie值
ssion = requests.session()
# 2. 处理 headers
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
# 3. 需要登录的用户名和密码
data = {"email":"XXXXXX@163.com", "password":"password"}
# 4. 发送附带用户名和密码的请求,并获取登录后的Cookie值,保存在ssion里
ssion.post("http://www.renren.com/PLogin.do", data = data)
# 5. ssion包含用户登录后的Cookie值,可以直接访问那些登录后才可以访问的页面
response = ssion.get("http://www.renren.com/410043129/profile")
# 6. 打印响应内容
print(response.text)
- 处理HTTPS请求 SSL证书验证
在这里插入代码片如果我们想跳过SSL的证书验证,把 verify 设置为 False 就可以正常请求了。
r = requests.get("https://www.12306.cn/mormhweb/", verify = False)
数据提取-lxml模块
# 使用 lxml 的 etree 库
from lxml import etree
text = '''
<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a> # 注意,此处缺少一个 </li> 闭合标签
</ul>
</div>
'''
#利用etree.HTML,将字符串解析为HTML文档
html = etree.HTML(text)
# 按字符串序列化HTML文档
result = etree.tostring(html)
print(result)
输出结果:
<html>
<body>
<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</body>
</html>
lxml 可以自动修正 html 代码,例子里不仅补全了 li 标签,还添加了 body,html 标签。
再利用 etree.parse() 方法来读取文件。
from lxml import etree
# 读取外部文件 hello.html
html = etree.parse('./hello.html')
result = etree.tostring(html, pretty_print=True)
print(result)
- 使用lxpath语法,提取数据
from lxml import etree
text = ''' <div>
<ul>
<li class="item-1"><a>first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a>
</ul>
</div> '''
html = etree.HTML(text)
print(html)
#查看element对象中包含的字符串
# print(etree.tostring(html).decode())
#获取class为item-1 li下的a的herf
ret = html.xpath("//li[@class=\"item-1\"]/a/@href")
print(ret)
requests和lxml模块,爬虫实例
import requests
from lxml import etree
import json
class QiuShiBaiKe():
def __init__(self):
self.url = "https://www.qiushibaike.com/text/{}"
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}
def parse_url(self, url):
# 发送请求,获取响应
print(url)
html_str = requests.get(url, headers=self.headers)
return html_str.content.decode()
def draw_data(self, response):
# 提取数据
html = etree.HTML(response)
content_list = []
# 根据div来分组
div_list = html.xpath("//div[contains(@class,'article block untagged mb15')]")
for div in div_list:
item = {}
# 提取头像
item["title_img"] = "https:" + div.xpath(".//img/@src")[0] if len(div.xpath(".//img/@src")) > 0 else None
# 提取名字
item["name"] = div.xpath(".//img/@alt")[0] if len(div.xpath(".//img/@alt")) > 0 else None
# 提取性别
item["gender"] = div.xpath(".//div[contains(@class,'articleGender')]/@class")
item["gender"] = item["gender"][0].split(" ")[-1].replace("Icon", " ") if len(item["gender"]) > 0 else None
# 提取年龄
item["age"] = div.xpath(".//div[contains(@class,'articleGender')]/text()")[0] if len(div.xpath(".//div[contains(@class,'articleGender')]/text()")) > 0 else None
# 提取内容
item["content"] = div.xpath(".//div[@class='content']/span/text()") if len(div.xpath(".//div[@class='content']/span/text()")) > 0 else None
item["content"] = [i.replace("\n", " ") for i in item["content"]]
# 提取好笑数
item["laughable"] = div.xpath(".//i[@class='number']/text()")[0] if len(div.xpath(".//span[@class='stats-vote']/i")) else None
# 提取评论数
item["comment"] = div.xpath(".//i[@class='number']/text()")[1] if len(div.xpath(".//i[@class='number']/text()")) else None
# 提取详情页的url
item["content_url"] = "https://www.qiushibaike.com" + div.xpath(".//a[@class='contentHerf']/@href")[0] if len(div.xpath(".//a[@class='contentHerf']/@href")) else None
# 提取最佳评论
item["best_comment"] = div.xpath("//div[@class='main-text']/text()")[0] if len(div.xpath(".//div[@class='main-text']/text()")) else None
content_list.append(item)
print(content_list)
return content_list
def save_data(self, content_list):
# 保存数据
with open("糗事百科.txt", 'a', encoding="utf-8") as f:
for content in content_list:
f.write(json.dumps(content, ensure_ascii=False))
f.write("\n")
print("保存成功")
def run(self):
# 1.star_url 构造url地址
for i in range(1, 14):
print(i)
url = self.url.format(i)
# 2.发起请求,获取响应
response = self.parse_url(url)
# 3.提取数据
content_list = self.draw_data(response)
# 4.保存数据
self.save_data(content_list)
if __name__ == '__main__':
qiushibaike = QiuShiBaiKe()
qiushibaike.run()