爬虫笔记第3章

本文为系列笔记s2,希望督促自己坚持下来学习。
新来的小伙伴去看s1:先看爬虫系列笔记s1哦

视频第3章关于数据解析,也是爬虫的重中之重。

1 正则表达式

在这里插入图片描述

需求:爬取糗事百科热图页面,保存到指定文件夹

学到逐步分析页面结构和新建文件夹

#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import requests

if __name__ == '__main__':
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
    }
    if not os.path.exists('./糗图libs'):
        os.mkdir('./糗图libs') # 创建文件夹
    ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>'

这里打断,分析网页源代码。
在这里插入图片描述
图片链接存放在div class = 'thumb’内部,被src包裹,故得出正则表达式ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>',接下来for 循环获得前2页的全部图片链接如下。

    for i in range(1,3):
        i = str(i)
        url = 'https://www.qiushibaike.com/imgrank/page/'+i+'/'
        page_text = requests.get(url=url,headers=headers).text # 获得页面内容
        res_list = re.findall(ex,page_text,re.S) 
        for res in res_list:
            img_url = 'https:' + res # 补充完整链接。
            img_data = requests.get(url = img_url).content 
            name = img_url.split('/')[-1]   # 生成图片名称
            imgpath = './糗图libs/' + name   # 路径记得带/
            with open(imgpath,'wb') as fp: 
                fp.write(img_data)
                # print(name + '下载打印成功')

2 bs4解析

在这里插入图片描述

import requests
from bs4 import BeautifulSoup
if __name__ == "__main__":
    headers={
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
    }
    base_url ='http://www.shicimingju.com/book/sanguoyanyi.html' # 书目链接
    page_text = requests.get(url = base_url,headers=headers)
    page_text.encoding='utf-8'  # 当编码有问题的时候,可以通过这样的方式进行修正
    soup = BeautifulSoup(page_text.text, 'lxml')
    a_eles = soup.select('.book-mulu > ul > li')   # select选择器。返回的是list 
    fp =  open('./三国.txt', 'w',encoding='utf-8')
    for li in a_eles:
        title = li.a.string
        detail_url = 'http://www.shicimingju.com'+ li.a['href']
        detail_text = requests.get(url=detail_url, headers=headers)
        detail_text.encoding = 'utf-8'
        detail_soup = BeautifulSoup(detail_text.text, 'lxml')
        content = detail_soup.find('div', class_='chapter_content').text

        fp.write(title+":"+content+'\n')
        print(title + '结束下载')

3 xpath解析

在这里插入图片描述

1 需求:解析58二手房的相关数据

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File    : xpath58二手房.py

#解析出一级页面的标题和二级页面的价格和描述
import requests
from lxml import etree

if __name__ == "__main__":
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
    }
    url = 'https://bj.58.com/ershoufang/'
    page_text = requests.get(url=url,headers=headers).text
    #print(page_text)
    tree = etree.HTML(page_text) #实例化对象
    title_list = tree.xpath('//div[@class="property-content-title"]/h3/text()')
    #print(title_list)
    title_url = tree.xpath('//section[@class="list"]//div[@class="property"]/a/@href')
    print(title_url)

当我想进一步解析详情页面时 ,58真有你的昂!
在这里插入图片描述

2 需求:解析图片数据:http://pic.netbian.com/4kmeinv/

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://pic.netbian.com/4kmeinv/ 美女图链接
import os
import requests
from lxml import etree
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
# if not os.path.exists('./美图libs'):
#     os.mkdir('./美图libs')  # 创建文件夹
for i in range(2,10):
    url = 'https://pic.netbian.com/4kmeinv/index_'+str(i) + '.html'
    response = requests.get(url=url,headers=headers)
    #获取页面原始编码格式
    #print(response.encoding)
    page_text = response.text
    tree = etree.HTML(page_text)
    li_list = tree.xpath('//div[@class="slist"]/ul/li') #先提取li标签
    for li in li_list:
        img_url = 'http://pic.netbian.com'+li.xpath('./a/img/@src')[0]
        img_name = li.xpath('./a/img/@alt')[0]
        # 改变编码,这是一种通用的处理中文乱码的解决方法
        img_name = img_name.encode('iso-8859-1').decode('gbk') + ".jpg"
        #print(img_url,img_name)
        img_data = requests.get(url=img_url).content
        imgpath = './美图libs/' + img_name  # 路径记得带/
        with open(imgpath, 'wb') as fp:
            fp.write(img_data)
            print(img_name + '下载打印成功')

在这里插入图片描述

在这里插入图片描述

3 需求:解析出所有城市名称https://www.aqistudy.cn/historydata/

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 项目需求:解析出所有城市名称https://www.aqistudy.cn/historydata/

import requests
from lxml import etree
url = 'https://www.aqistudy.cn/historydata/'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
response = requests.get(url=url,headers=headers)
#获取页面原始编码格式
# print(response.encoding)
page_text = response.text
tree = etree.HTML(page_text)
li_list = tree.xpath('//div[@class="bottom"]/ul/li | //div[@class="bottom"]/ul//li')
for li in li_list:
    city_name = li.xpath('./a/text()')[0]
    city_url = 'https://www.aqistudy.cn/historydata/'+li.xpath('./a/@href')[0]
    print(city_name,city_url)

4 获取煎蛋网图片

#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os

import requests
from lxml import etree
from fake_useragent import UserAgent

url = 'http://jandan.net/ooxx'
ua = UserAgent(verify_ssl=False,use_cache_server=False).random
headers = {
    'User-Agent':ua
}
page_text = requests.get(url=url,headers=headers).text

tree = etree.HTML(page_text)

if not os.path.exists('./煎蛋libs'):
    os.mkdir('./煎蛋libs')  # 创建文件夹

imgCode_list = tree.xpath('//div[@class="text"]/p/img/@src')

for url in imgCode_list:
    filePath = 'https:' + url
    print(filePath)
    img_name = filePath.split('/')[-1]
    img_data = requests.get(url=filePath).content
    imgpath = './煎蛋libs/' + img_name  # 路径记得带/
    with open(imgpath, 'wb') as fp:
        fp.write(img_data)
        print(img_name + '下载打印成功')

over is Over——Lee

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值