day20request和bs4

request和bs4

正则解析贝壳租房

# 1.获取网页数据
response = requests.get('https://cd.zu.ke.com/zufang')
result = response.text
# print(result)

# 2.解析数据
names = findall(r'(?s)<a class="twoline".+?>(.+?)</a>',result)
names = [x.strip() for x in names]
# print(names)

prices = findall(r'span class="content__list--item-price"><em>(\d+)</em>',result)
# print(prices)
house = map(lambda i1,i2:(i1,i2),names,prices)
# print(list(house))

f = open('files/租房.csv','a',encoding='utf-8',newline='')
writer = csv.writer(f)
writer.writerow(['名称', '价格'])
writer.writerows(list(house))
f.close()

图片下载

import requests


def download_image(url:str):
    # 1.对图片地址发送请求
    response = requests.get('url')

    # 2.获取图片数据
    date = response.content

    # 3.将数据写入图片文件中
    with open('files/qx.jpeg','wb') as f:
        f.write(date)


if __name__ == '__main__':
    download_image('https://p2.itc.cn/images01/20220811/6d640bcab37e4e5ea17e7c747f8f4a53.jpeg')

图片批量下载

import requests
from re import findall
from uuid import uuid1   # 可以创建一个唯一的id值


def download_image(url:str):
    response = requests.get(url)
    with open(f'files/{uuid1()}.jpg','wb') as f:
        f.write(response.content)


# 1.获取整个页面的数据
response = requests.get('https://cd.zu.ke.com/zufang')
content = response.text
# print(content)

# 2.解析数据获取所有房屋的图片地址
all_images = findall(r'(?s)<a\s+class="content__list--item--aside".+?>\s+<img.+?data-src="(.+?)"', content)
# print(all_images)

# 3.下载所有图片
for x in all_images:
    download_image(x)

浏览器伪装

import requests

headers = {
    'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}

response = requests.get('https://movie.douban.com/top250',headers=headers)

print(response.text)

bs4数据解析

# 1.bs4的作用
"""
专门用来解析网页数据的第三方库。(基于css选择器解析网页数据)
这个库下载的时候用'beautifulsoup4',使用的时候用'bs4'

注意:在使用bs4做数据解析的时候,需要依赖'lxml'这个第三方库
"""
# 导入解析相关类
from bs4 import BeautifulSoup

# 2.bs4的用法
# 1)准备需要解析的数据(获取网页数据)
html = open('files/test.html',encoding='utf-8').read()

# 2)基于网页源代码创建BeautifulSoup对象
# soup对象代表网页对应的html标签(代表整个网页)
soup = BeautifulSoup(html,'lxml')

# 3)获取标签
# soup.select(css选择器)  -  获取css选择器选中的所有标签,返回值是一个列表,列表中的元素是标签对象
# soup.select_one(css选择器)  -  获取css选择器选中的第一个标签,返回值是标签对象
result = soup.select('#box1 p')
print(result)       # [<p>肖生克的救赎</p>, <p>霸王别姬</p>, <p>阿甘正传</p>]

result = soup.select_one('#box1 p')
print(result)       # <p>肖生克的救赎</p>

# 标签对象.select(css选择器)       -       在指定标签中获取css选择器选中的所有表情,返回值是一个列表,列表中元素是标签对象
# 标签对象.select_one(css选择器)   -       在指定标签中获取css选择器选中的第一个标签,返回值是标签对象
result = soup.select('p')
print(result)       # [<p>肖生克的救赎</p>, <p>霸王别姬</p>, <p>阿甘正传</p>, <p>我是段落1</p>]

box2 = soup.select_one('#box2')
result = box2.select('p')
print(result)       # [<p>我是段落1</p>]

# 4)获取标签内容和标签属性
p = soup.select_one('p')            #  <p>肖生克的救赎</p>
img = soup.select_one('img')

# a. 获取标签内容: 标签对象.text
print(p.text)       # '肖生克的救赎'

# b.获取标签属性值: 标签对象.attrs[属性名]
print(img.attrs['src'], img.attrs['alt'])

bs4解析豆瓣电影单页数据

import csv
from bs4 import BeautifulSoup
import requests


# 1. 获取网页数据
def get_net_date(url:str):
    Headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=Headers)
    return response.text


# 2. 网页数据解析
def analysis_data(html: str):
    # 解析数据
    soup = BeautifulSoup(html,'lxml')
    all_film_div = soup.select('.grid_view>li>.item')
    all_data = []
    for div in all_film_div:
        name = div.select_one('.title').text
        info = div.select_one('.bd>p').text.strip().split('\n')[-1].strip()
        # print(info)
        time,country,category = info.split('/')
        # print(time,country,category)
        score = div.select_one('.rating_num').text
        # print(score)
        comment_count = div.select('.star>span')[-1].text[:-3]
        # print(comment_count)
        intro = div.select_one('.inq').text
        # print(intro)
        all_data.append([name,time.strip(),country.strip(),category.strip(),score,comment_count,intro])

    f = open('files/豆瓣电影第一页数据.csv','w',encoding='utf-8',newline='')
    writer = csv.writer(f)
    writer.writerow(['电影名', '上映时间', '国家', '类型', '评分', '评论数', '简介'])
    writer.writerows(all_data)


if __name__ == '__main__':
    result = get_net_date('https://movie.douban.com/top250')
    analysis_data(result)

豆瓣电影

import csv
from bs4 import BeautifulSoup
import requests


# 1. 获取网页数据
def get_net_date(url: str):
    Headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=Headers)
    return response.text


# 2. 网页数据解析
def analysis_data(html: str):
    # 解析数据
    soup = BeautifulSoup(html, 'lxml')
    all_film_div = soup.select('.grid_view>li>.item')
    all_data = []
    for div in all_film_div:
        name = div.select_one('.title').text
        info = div.select_one('.bd>p').text.strip().split('\n')[-1].strip()
        # time, country, category = info.split('/')
        info_list = info.split('/')
        time = info_list[0]
        country = info_list[-2]
        category = info_list[-1]
        score = div.select_one('.rating_num').text
        comment_count = div.select('.star>span')[-1].text[:-3]
        intro_span = div.select_one('.inq')
        if intro_span:
            intro = intro_span.text
        else:
            intro = ''
        all_data.append([name, score, time.strip(), country.strip(), category.strip(), comment_count, intro])

    writer.writerows(all_data)
    print('保存成功!')


if __name__ == '__main__':
    f = open('files/豆瓣电影.csv', 'w', encoding='utf-8', newline='')
    writer = csv.writer(f)
    writer.writerow(['电影名', '评分', '上映时间', '国家', '类型', '评论数', '简介'])

    for page in range(0, 251, 25):
        url = f'https://movie.douban.com/top250?start={page}&filter='
        result = get_net_data(url)
        analysis_data(result)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值