学习总结week4_5爬虫

python- 爬虫

1.图片下载

对图片数据发送请求

response = requests.get('https://file02.16sucai.com/d/file/2014/0829/372edfeb74c3119b666237bd4af92be5.jpg')

获取图片数据

date = response.content

将数据写入图片文件中

with open('files/test1.jpeg', 'wb') as f:
	f.write(date)
def download_image(url: str):
    response = requests.get(url)
    with open(f'files/{uuid1()}.jpeg', 'wb') as f:
        f.write(response.content)


response = requests.get('https://cd.zu.ke.com/zufang')
res = response.text
# print(res)

all_img = re.findall(r'(?s)<a\s+class="content__list--item--aside".+?>\s+<img.+?data-src="(.+?)"', res)

for x in all_img:
    download_image(x)
2.伪装浏览器
import requests

headers = {
    'user-agent':'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'
}
response = requests.get('https://movie.douban.com/top250', headers=headers)
print(response)
3.bs4作用

专门用来解析网页数据的第三方库(基于css选择器解析网页数据)
这个库下载的时候用’beautifulsoup4’,使用的时候用’bs4’

注意,在使用bs4座数据解析的时候需要依赖’lxml’这个第三方库

from bs4 import BeautifulSoup

bs4的用法

基于网页的源代码创建BeautifulSoup对象

html = open('files/test.html', encoding='utf-8').read()

基于网页源代码创建BeautifulSoup对象

soup对象代表的是整个网页

soup = BeautifulSoup(html, 'lxml')

获取标签
soup.select(css选择器) - 获取css选择器选中的所有标签,返回值是一个列表,列表中元素是标签对象
soup.select_one(css选择器) - 获取css选择器选中的第一个标签。返回值是标签对象

result = soup.select('#box1 p')
print(result)       # [<p>肖生克的救赎</p>, <p>霸王别姬</p>, <p>阿甘正传</p>]

result = soup.select_one('#box1 p')
print(result)       # <p>肖生克的救赎</p>

标签对象.select(css选择器) - 在指定标签中获取css选择器选中的所有标签,返回值是一个列表,列表中元素是标签对象
标签对象.select_one(css选择器) - 在指定标签中获取css选择器选中的第一个标签。返回值是标签对象

result = soup.select('p')
print(result)       # [<p>肖生克的救赎</p>, <p>霸王别姬</p>, <p>阿甘正传</p>, <p>我是段落1</p>]

box2 = soup.select_one('#box2')
result = box2.select('p')
print(result)       # [<p>我是段落1</p>]
获取标签内容和标签属性
p = soup.select_one('p')            #  <p>肖生克的救赎</p>
img = soup.select_one('img')

a. 获取标签内容: 标签对象.text
print(p.text)       # '肖生克的救赎'

b.获取标签属性值: 标签对象.attrs[属性名]
print(img.attrs['src'], img.attrs['alt'])

单页数据

import csv
import requests
from bs4 import BeautifulSoup


def get_net_data(url: str):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=headers)
    return response.text


def analysis_data(html: str):
    all_res = []
    soup = BeautifulSoup(html, 'lxml')
    all_div = soup.select('.grid_view>li>.item')
    for div in all_div:
        name = div.select_one('.title').text
        doctor = div.select_one('.bd>p').text.strip().split('\n')[0].split('主演')[0].strip()
        score = div.select_one('.rating_num').text
        count = div.select('.star>span')[-1].text[:-3]
        intro = div.select_one('.quote>span').text
        all_res.append([name, doctor, score, count, intro])

    with open('files/sj.csv', 'w', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['电影名', '导演', '评分', '人数', '简介'])
        writer.writerows(all_res)


if __name__ == '__main__':
    result = get_net_data('https://movie.douban.com/top250')
    analysis_data(result)

多页数据

import requests
import csv
from bs4 import BeautifulSoup


# 1. 获取网页数据
def get_net_data(url: str):
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'
    }
    response = requests.get(url, headers=headers)
    return response.text


# 2. 网页数据解析
def analysis_data(html: str):
    # 解析数据
    soup = BeautifulSoup(html, 'lxml')
    all_film_div = soup.select('.grid_view>li>.item')
    all_data = []
    for div in all_film_div:
        name = div.select_one('.title').text
        info = div.select_one('.bd>p').text.strip().split('\n')[-1].strip()
        # time, country, category = info.split('/')
        info_list = info.split('/')
        time = info_list[0]
        country = info_list[-2]
        category = info_list[-1]
        score = div.select_one('.rating_num').text
        comment_count = div.select('.star>span')[-1].text[:-3]
        intro_span = div.select_one('.inq')
        if intro_span:
            intro = intro_span.text
        else:
            intro = ''
        all_data.append([name, score, time.strip(), country.strip(), category.strip(), comment_count, intro])

    writer.writerows(all_data)
    print('保存成功!')


if __name__ == '__main__':
    f = open('files/豆瓣电影.csv', 'w', encoding='utf-8', newline='')
    writer = csv.writer(f)
    writer.writerow(['电影名', '评分', '上映时间', '国家', '类型', '评论数', '简介'])

    for page in range(0, 251, 25):
        url = f'https://movie.douban.com/top250?start={page}&filter='
        result = get_net_data(url)
        analysis_data(result)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值