python爬虫-案例

1.爬取个人博客_去掉广告和导航
import requests
from bs4 import BeautifulSoup

def get_content(url,):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:

        print(response.url)
        print("爬取成功!")
        return  response.content

def parser_content(htmlContent):

    # 实例化soup对象, 便于处理;
    soup = BeautifulSoup(htmlContent, 'html.parser')

    # 提取页面的头部信息, 解决乱码问题
    headObj = soup.head

    # 提取需要的内容;
    divObj = soup.find_all('div', class_="blog-content-box")[0]

    #
    scriptObj = soup.script
    with open('doc/csdn.html', 'w') as f:
        # 写入头部信息(指定编码格式和标题)
        f.write(str(headObj))
        # 写入博客正文;
        f.write(str(divObj))
        print("下载成功......")

        # f.write(str(scriptObj))
if __name__ == '__main__':
    url = "https://blog.csdn.net/King15229085063/article/details/87380182"
    content = get_content(url)
    parser_content(content)
2.个人博客整理
import requests
from bs4 import BeautifulSoup
import re


def get_content(url,):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:

        print(response.url)
        print("爬取成功!")
        return  response.content


def parser_content(htmlContent):
    # 实例化soup对象, 便于处理;
    soup = BeautifulSoup(htmlContent, 'html.parser')
    # 1). 获取每个博客的大盒子: 特征: div标签, class名称一致article-item-box csdn-tracking-statistics
    #  <div class="article-item-box csdn-tracking-statistics" data-articleid="85718923">
    divObjs = soup.find_all('div', class_="article-item-box")
    # 2). 依次遍历每一个div标签, 获取博客标题
    #  博客标题的特征: h4里面的a标签里面的内容
    # 去掉默认的广告, 留下个人的博客内容;

    for  divObj in divObjs[1:]:
        # **2-1. 获取博客标题: 去掉原创或者转载的信息, 只需要博客名称;
        title = divObj.h4.a.get_text().split()[1]
        # **2-2. 获取博客链接, 也就是获取a链接中href对应的值;
        blogUrl = divObj.h4.a.get('href')
        global  bloginfo
        # 将爬取的所有内容保存到变量中[(blogtitle, blogurl)]
        bloginfo.append((title, blogUrl))


if __name__ == '__main__':
    blogPage = 3
    # 全局变量, 用于保存所有博客信息;
    bloginfo = []
    for page in range(1, blogPage+1):
        url = "https://blog.csdn.net/King15229085063/article/list/%s" %(page)
        content = get_content(url)
        parser_content(content)
        print("第%d页整理结束...." %(page))


    with open('doc/myblog.md', 'a') as f:
        for index, info in enumerate(bloginfo[::-1]):
            f.write('- 第%d篇博客: [%s](%s)\n' %(index+1, info[0], info[1]))
    print("完成.....")
3.爬取豆瓣TOP250电影信息

url = “http://movie.douban.com/top250/

import re

import requests
from bs4 import BeautifulSoup

def get_content(url,):
    try:
        user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36"
        response = requests.get(url,  headers={'User-Agent': user_agent})
        response.raise_for_status()   # 如果返回的状态码不是200, 则抛出异常;
        response.encoding = response.apparent_encoding  # 判断网页的编码格式, 便于respons.text知道如何解码;
    except Exception as e:
        print("爬取错误")
    else:

        print(response.url)
        print("爬取成功!")
        return  response.content

def parser_content(htmlContent):
    # 实例化soup对象, 便于处理;
    soup = BeautifulSoup(htmlContent, 'html.parser')
    #  1). 电影信息存储在ol标签里面的li标签:
    #  <ol class="grid_view">
    olObj = soup.find_all('ol', class_='grid_view')[0]

    #  2). 获取每个电影的详细信息, 存储在li标签;
    details = olObj.find_all('li')


    for detail in details:
        # #  3). 获取电影名称;
        movieName = detail.find('span', class_='title').get_text()

        # 4). 电影评分:
        movieScore = detail.find('span', class_='rating_num').get_text()

        # 5). 评价人数***************
        # 必须要转换类型为字符串
        movieCommentNum = str(detail.find(text=re.compile('\d+人评价')))


        # 6). 电影短评
        movieCommentObj = detail.find('span', class_='inq')
        if movieCommentObj:
            movieComment = movieCommentObj.get_text()
        else:
            movieComment = "无短评"

        movieInfo.append((movieName, movieScore, movieCommentNum, movieComment))




import openpyxl


def create_to_excel(wbname, data, sheetname='Sheet1', ):
    """
    将制定的信息保存到新建的excel表格中;

    :param wbname:
    :param data: 往excel中存储的数据;
    :param sheetname:
    :return:
    """

    print("正在创建excel表格%s......" % (wbname))

    # wb = openpyxl.load_workbook(wbname)
    #  如果文件不存在, 自己实例化一个WorkBook的对象;
    wb = openpyxl.Workbook()
    # 获取当前活动工作表的对象
    sheet = wb.active
    # 修改工作表的名称
    sheet.title = sheetname
    # 将数据data写入excel表格中;
    print("正在写入数据........")
    for row, item in enumerate(data):  # data发现有4行数据, item里面有三列数据;
        print(item)
        for column, cellValue in enumerate(item):

            # cell = sheet.cell(row=row + 1, column=column + 1, value=cellValue)
            cell = sheet.cell(row=row+1, column=column + 1)
            cell.value = cellValue

    wb.save(wbname)
    print("保存工作薄%s成功......." % (wbname))


if __name__ == '__main__':
    doubanTopPage = 2
    perPage = 25
    # [(), (), ()]
    movieInfo = []
    # 1, 2, 3 ,4, 5
    for page in range(1, doubanTopPage+1):
        # start的值= (当前页-1)*每页显示的数量(25)
        url = "https://movie.douban.com/top250?start=%s" %((page-1)*perPage)
        content = get_content(url)
        parser_content(content)


    create_to_excel('/tmp/hello.xlsx', movieInfo, sheetname="豆瓣电影信息")
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值