Day022 - requests与bs4

requests

  • python基于http协议进行网络请求的的三方库
import requests

发送请求

  • requests.get(url, *, headers, params, proxies) - 发送get请求

  • requests.post(url, *, headers, params, proxies) - 发送post请求

  • 参数:

    • url - 请求地址(一个网站的网址、接口地址、图片地址等)
    • headers - 设置请求头(设置cookie和User-Agent的时候使用)
    • params - 设置参数
    • proxies - 设置代理
# 1.1 发送get请求,参数直接拼接到url中
requests.get('www.xxx.com?key=1234567890&num=10')

# 1.2 参数放在params参数中
params = {
    'key': '1234567890',
    'num': '10'
}
requests.get('www.xxx.com',params=params)
requests.post('www.xxx.com', params=params)

添加请求头

添加User-Agent

header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67'}
response = requests.get('https://www.51job.com', headers=header)
response.encoding = 'gbk'
print(response.text)

添加cooike

header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67',
    'cookie':'_zap'
}
response = requests.get('https://www.zhihu.com', headers=header)
print(response.text)

获取响应信息

response = requests.get('http://www.yingjiesheng.com/')  # 获取网页对象

设置编码方式(乱码的时候才需要设置)

response.encoding = 'GBK'  # 设置编码方式

获取响应头

head = response.headers  # 获取响应头
# print(head)

获取响应体

获取text值
  • 用于请求网页,拿到网页源代码
res_text = response.text
# print(res_text)
获取json解析结果
  • 用于返回json数据的数据接口
res_json = response.json()
# print(res_json)
获取content值
  • 获取二进制类型的原数据,用于图片、视频、音频的下载
res_con = response.content
# print(res_con)

json解析

# 今日头条json数据接口获取数据
url = 'https://www.toutiao.com/hot-event/hot-board/?origin=toutiao_pc&_signature=_02B4Z6wo00d01lV7XNAAAIDC1XmmkccfxtZVX1hAAPRc4q1lffyoHnSH6QUjeMpESOysibusQzMT4SFKQRYqPcysxXX0rosf5v95jp--h4Twooxn1Q7PvJH1hggqpvwVxGkcisci55B1y4d-13'
response = requests.get(f'{url}')
res_json = response.json()['data']
for news in res_json:
    print(news['Title'])

图片下载

def download_image(img_url):
    # 请求网络图片数据
    response = requests.get(img_url)
    # 获取数据
    data = response.content
    # 保存数据到本地文件
    f = open(f'files/{img_url.split("/")[-1].split("!")[0]}', 'wb')
    f.write(data)
    print('下载完成!')


if __name__ == '__main__':
    response = requests.get('https://www.58pic.com/tupian/qixi-0-0.html')
    result = findall(r'(?s)<img src="(\S+?)">', response.text)

    for x in result:
        download_image(f'https:{x}')

千图网图片获取

from re import findall
import requests

def download_image(img_url):
    # 请求网络图片数据
    response = requests.get(img_url)
    # 获取数据
    data = response.content
    # 保存数据到本地文件
    f = open(f'files/{img_url.split("/")[-1]}', 'wb')

if __name__ == '__main__':
    url = ''
    regular = r''
    response = requests.get(url)
    result = findall(regular, response.text)

bs4的使用

from bs4 import BeautifulSoup
  1. 准备需要解析的网页数据(实际是用requests或者selenium获取)
data = open('text1.html', encoding='utf-8').read()
  1. 创建BeautifulSoup对象(可以自动纠正数据中错误的html结构)
soup = BeautifulSoup(data, 'lxml')
# print(soup)
  1. 通过BeautifulSoup对象获取标签和标签内容
  • 获取标签
    • BeautifulSoup对象.select(css选择器) - 获取css选择器中所有的标签,返回列表,元素是选中的标签对象
    • BeautifulSoup对象.select_one(css选择器) - 获取css选择器中的第一个标签,返回标签对象
    • 标签对象.select/select_one(css选择器) - 在标签对象中获取css选择器选中的标签
result = soup.select('p')
print(result)  # [<p id="p1">我是段落1</p>, <p id="p2">我是段落2</p>]

result = soup.select_one('p')
print(result)  # <p id="p1">我是段落1</p>

result = soup.select('#p1')
print(result)  # [<p id="p1">我是段落1</p>]

result = soup.select_one('#p1')
print(result)  # <p id="p1">我是段落1</p>
  • 获取标签内容
    • 标签对象.string - 获取标签中的文字内容,只有标签内容是纯文字才有效,否则结果是None
    • 标签对象.get_text() - 标签内容中所有的文字信息
    • 标签对象.contents
# 标签对象.string
p2 = soup.select_one('div>p')
print(p2)  # <p id="p2">我是段落2</p>
print(p2.string)  # 我是段落2

s1 = soup.select_one('#s1')
print(s1)  # <span id="s1"><b>我是span1</b></span>
print(s1.string)  # None

# 标签对象.get_text()
print(p2.get_text())  # 我是段落2
print(s1.get_text())  # 我是span1

# 标签对象.contents
print(p2.contents)  # ['我是段落2']
result = s1.contents
print(result)  # ['我是', <b>span1</b>]
print(result[-1].get_text())  # span1
  • 获取标签属性
a1 = soup.select_one('div>a')
print(a1)  # <a href="https://www.baidu.com">我是超链接2</a>
print(a1.attrs['href'])  # https://www.baidu.com

img = soup.select_one('img')  # <img alt="" src="http://www.gaoimg.com/uploads/allimg/210801/1-210P1151401S1.jpg"/>
print(img)  # <img alt="" src="http://www.gaoimg.com/uploads/allimg/210801/1-210P1151401S1.jpg"/>
print(img.attrs['src'])  # http://www.gaoimg.com/uploads/allimg/210801/1-210P1151401S1.jpg

豆瓣电影TOP250信息抓取

import csv
import requests
from bs4 import BeautifulSoup

# 创建保存信息的csv文件
f = open('files/doubantop250.csv', 'a', encoding='utf-8', newline='')
writer = csv.writer(f)

# 设置请求头信息
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36 Edg/92.0.902.67',
    'Cookie': '自己的Cookie信息'
}

# 对每页信息进行遍历获取
for x in range(0, 226, 25):
    url = f'https://movie.douban.com/top250?start={x}&filter='
    print(url)
    res = requests.get(url, headers=header)
    soup = BeautifulSoup(res.text, 'lxml')
    # 定位到保存电影信息的块中
    all_movie_li = soup.select('#content>div>div.article>ol>li')
    # 遍历每部电影的信息,取出所需要的信息
    tmp = []
    for movie_info in all_movie_li:
        mov_name = movie_info.select_one('.pic>a>img').attrs['alt']
        mov_star = movie_info.select_one('.rating_num').get_text()
        # 信息缺失项报错跳过
        try:
            mov_quote = movie_info.select_one('.inq').get_text()
        except AttributeError:
            mov_quote = None
        mov_rev = movie_info.select('.info>.bd>.star>span')[3].get_text()
        mov_img = movie_info.select_one('.pic>a>img').attrs['src']
        tmp.extend([mov_name, mov_star, mov_rev, mov_img, mov_quote])
        # 写入文件
        writer.writerow(tmp)
        tmp.clear()
# CSV结果样式
电影名,评分,评分人数,电影海报,短评
肖申克的救赎,9.7,2418141人评价,https://img2.doubanio.com/view/photo/s_ratio_poster/public/p480747492.jpg,希望让人自由。
霸王别姬,9.6,1799374人评价,https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2561716440.jpg,风华绝代。
阿甘正传,9.5,1819919人评价,https://img2.doubanio.com/view/photo/s_ratio_poster/public/p2372307693.jpg,一部美国近现代史。
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值