Learning Diary for Python - 19
一、requests的用法
-
requests是Python用于网络(http)请求的第三库,也是爬虫获取网络数据的重要工具
-
向目标地址(网络地址)发送请求
requests.get(url,*,headers, proxies, timeout) - 以指定的方式给地址发送网络请求,返回值是服务器返回的响应对象
url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,数据分析,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
response = requests.get(url, headers=headers)
参数 | 说明 |
---|
url | 字符串;请求的网络地址,可能是目标网站的网址也可能是数据接口 |
headers | 字典;请求头,用于伪装浏览器设置user-agent、完成自动登录设置cookie |
proxies | 字典;设置代理ip(ip地址被封的时候使用) |
timeout | 数字;设置超时时间 |
-
获取
# 1. 获取响应头
# print(response.headers)
# 2. 设置响应的编码方式(在获取请求结果乱码的时候使用)
response.encoding = 'gbk'
# 3.获取请求结果
# 1)获取字符串格式的结果(爬网页)
# print(response.text)
# 2) 获取二进制格式的结果(爬图片、视频、音频)
# print(response.content)
# 3)获取json解析的结果(爬接口数据)
# print(response.json())
4.获取图片
import requests
# 1. 对图片地址发送请求
url = 'https://www.baidu.com/img/PCtm_d9c8750bed0b3c7d089fa7d55720d6cf.png'
response = requests.get(url)
# 2. 保存图片数据到本地
open('files/a.png', 'wb').write(response.content)
5.json接口
import requests
import os
url = 'https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js'
response = requests.get(url)
count = 0
for hero in response.json()['hero']:
print(hero['name'])
a_url = hero['selectAudio']
res = requests.get(a_url)
open(f'files/{os.path.basename(a_url)}', 'bw').write(res.content)
count += 1
if count == 10:
break
二、bs4的用法
# 注意:安装第三方库的时候安装beautifulSoup4而不是bs4
from bs4 import BeautifulSoup
# 1. 创建BeautifulSoup对象
# BeautifulSoup(html代码, 'lxml') - 返回网页对象
html = open('files/05css选择器(重要).html').read()
soup = BeautifulSoup(html, 'lxml')
# 2. 获取标签
# 1) 基于整个网页获取标签
# BeautifulSoup对象.select(css选择器) - 返回css选择器在整个网页中选中的所有标签;返回值是列表,列表中元素是标签对象
# BeautifulSoup对象.select_one(css选择器) - 返回css选择器在整个网页中选中的第一个标签,返回值是标签对象
result = soup.select('p')
print(result)
result = soup.select('.c1')
print(result)
result = soup.select('div>p')
print(result)
result = soup.select_one('p')
print(result)
result = soup.select_one('div>p')
print(result)
# 2)基于指定标签获取标签
# 标签对象.select(css选择器) - 在指定标签中按照选择器选中相关标签
# 标签对象.select_one(css选择器)
div1 = soup.select_one('#box')
result = div1.select('p')
print(result)
# 3.获取标签内容和标签属性值
# 1)获取标签内容:
# 标签对象.text
result = soup.select_one('#p2').text
print(result) # 我是段落2
result = soup.select_one('#p2').string
print(result)
result = soup.select_one('#p2').contents
print(result)
# 获取所有p标签的标签内容
for p in soup.select('p'):
print(p.text)
# 2)获取标签属性
# 标签对象.attrs[属性名]
result = soup.select_one('#a1').attrs
print(result) # {'id': 'a1', 'href': 'https://www.baidu.com'}
result = soup.select_one('#a1').attrs['href']
print(result) # 'https://www.baidu.com'
result = soup.select_one('img').attrs['src']
print(result)
result = soup.select_one('img').attrs['alt']
print(result)
三、爬取练习
import requests
from bs4 import BeautifulSoup
import csv
from re import *
all_data = []
for i in range(0,250,25):
url = f'top250?start={i}&filter='
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
all_li = soup.select('.grid_view>li')
n = 0
for li in all_li:
n += 1
name = li.select_one('.title').text
result = li.select_one('.bd>p').text.strip().replace(' ', '')
director = search(r'导演:(.+?)\s', result).group(1)
actor1 = search(r'主演:(.+?)\s', result)
actor2=actor1.group(1) if actor1!=None else '...'
time = search(r'\n(\d+.*?)\s*/', result).group(1)
country = search(r'\n\d+.*?/(\D+?)\s*/', result).group(1).strip().replace(' ', '')
type = search(r'/.+?/\s*(.+?)$', result).group(1)
score = li.select_one('.rating_num').text
comment_num = li.select('.star>span')[-1].text
comment_num = search(r'\d+', comment_num).group()
img_url = li.select_one('.pic img').attrs['src']
print(n)
all_data.append([name, director,actor2, time, country, type, score, comment_num, img_url])
writer = csv.writer(open('data/电影.csv', 'w', encoding='utf-8', newline=''))
writer.writerow(['电影名称', '导演', '主演', '上映时间', '国家', '类型', '评分', '评论人数', '封面地址'])
writer.writerows(all_data)