爬虫之爬取百度贴吧

爬虫之爬取百度贴吧

直接示例代码:

import requests
# from lxml import html
# etree = html.etree
from lxml import etree
class Tieba(object):
    def __init__(self, name):
        self.url = f'https://tieba.baidu.com/f?kw={name}'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
            # 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1) '     #  低端浏览器没有被<!--  -->注释掉
        }

    def get_data(self, url):
        response = requests.get(url, headers=self.headers)
        #  把浏览器响应的内容保存到本地,以便查看响应的源码
        # with open('tieba.html', 'wb') as f:
        #     f.write(response.content)
        return response.content

    def parse_data(self, data):
        #  创建element对象
        data = data.decode().replace("<!--", "").replace("-->", "")   #  高端浏览器会把一些内容给注释掉的
        el_html = etree.HTML(data)
        el_list = el_html.xpath('//*[@id="thread_list"]/li/div/div[2]/div[1]/div[1]/a')  #  此处输出的是对象
        print(len(el_list))
        data_list = []
        for el in el_list:
            tmp = {}
            tmp['title'] = el.xpath('./text()')[0]    #  此处xpath取出的数据是列表,所以加上索引[0]
            tmp['href'] = 'http://tieba.com' + el.xpath('./@href')[0]      #  此处取出的索引是相对路径,所以前面还要拼接字符串
            data_list.append(tmp)
        print(data_list)
        try:
            # next_url = 'https' + el_html.xpath('//a[@class="next pagination-item "]/@href')
            next_url = 'https:' + el_html.xpath('//a[contains(text(),"下一页")]/@href')[0]
        except:
            next_url = None
        return data_list, next_url

    def save_data(self, data_list):
        for data in data_list:
            print(data)

    def run(self):
        #  url
        #  headers
        next_url = self.url
        while True:
            #  发送请求,获取响应
            data = self.get_data(next_url)
            #  从响应中提取数据(数据和翻页用的url)
            data_list, next_url = self.parse_data(data)
            self.save_data(data_list)
            #  判断是否终结
            if next_url == None:
                break


if __name__ == '__main__':
    tieba = Tieba('上海大学')
    tieba.run()

运行效果:

难点部分分析截图:

 将在百度贴吧爬取的数据爬取下来并写进csv文件中。

示例代码:

import requests
# from lxml import html
# etree = html.etree
import csv
from lxml import etree


class Tieba(object):

    def __init__(self, name):
        self.url = f'https://tieba.baidu.com/f?kw={name}'
        self.headers = {
            # 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
            'User-Agent': '/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
            # 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1) '     #  低端浏览器没有被<!--  -->注释掉
        }
        self.f = open('上海理工大学吧.csv', 'w', encoding='utf-8-sig', newline="")
        self.csv_write = csv.writer(self.f)

    def get_data(self, url):
        response = requests.get(url, headers=self.headers)
        #  把浏览器响应的内容保存到本地,以便查看响应的源码
        # with open('tieba.html', 'wb') as f:
        #     f.write(response.content)
        return response.content

    def parse_data(self, data):
        #  创建element对象
        data = data.decode().replace("<!--", "").replace("-->", "")  # 高端浏览器会把一些内容给注释掉的
        el_html = etree.HTML(data)
        # el_list = el_html.xpath('//*[@id="thread_list"]/li/div/div[2]/div[1]/div[1]/a')  #  此处输出的是对象
        el_list = el_html.xpath('//*[@id="thread_list"]/li/div')  # 此处输出的是对象
        print(len(el_list))
        # exit()
        data_list = []
        for el in el_list:
            tmp = {}
            tmp['title'] = el.xpath('./div[2]/div[1]/div[1]/a/text()')[0]  # 此处xpath取出的数据是列表,所以加上索引[0]
            tmp['href'] = 'http://tieba.com' + el.xpath('./div[2]/div[1]/div[1]/a/@href')[0]  # 此处取出的索引是相对路径,所以前面还要拼接字符串
            try:
                tmp['author'] = el.xpath('./div[2]/div[1]/div[2]/span[1]/span[1]/a/text()')[0]
            except:
                tmp['author'] = el.xpath('./div[2]/div[1]/div[2]/span[1]/span[1]/a/text()')
            try:
                tmp['reviewer'] = el.xpath('./div[2]/div[2]/div[2]/span[1]/a/text()')[0]
            except:
                tmp['reviewer'] = el.xpath('./div[2]/div[2]/div[2]/span[1]/a/text()')
            try:
                tmp['last_comment_time'] = el.xpath('./div[2]/div[2]/div[2]/span[2]/text()')[0]
            except:
                tmp['last_comment_time'] = el.xpath('./div[2]/div[2]/div[2]/span[2]/text()')
            try:
                tmp['comment'] = el.xpath('./div[2]/div[2]/div[1]/div/text()')[0]
            except:
                tmp['comment'] = el.xpath('./div[2]/div[2]/div[1]/div/text()')
            data_list.append(tmp)
        # print(data_list)

        #  获取csv他属性值
        a = []
        dict = data_list[0]
        for headers in sorted(dict.keys()):  # 把字典的键取出来
            a.append(headers)
        header = a  # 把列名给提取出来,用列表形式呈现
        print(a)
        self.csv_write.writerow(['title', 'href', 'author', 'reviewer', 'last_comment_time', 'comment'])
        # self.csv_write.writerow(a)

        try:
            # next_url = 'https' + el_html.xpath('//a[@class="next pagination-item "]/@href')
            next_url = 'https:' + el_html.xpath('//a[contains(text(),"下一页")]/@href')[0]
        except:
            next_url = None
        return data_list, next_url, header

    def save_data(self, data_list, header):

        for data in data_list:
            print(data)
            try:
                data['last_comment_time'] = data['last_comment_time'].replace('\r', '').replace('\n', '').strip()
                data['comment'] = data['comment'].strip().replace('\r', '').replace('\n', '')
            except:
                print('list')
            self.csv_write.writerow(
                [data['title'], data['href'], data['author'], data['reviewer'], data['last_comment_time'],
                 data['comment']])

    def run(self):
        #  url
        #  headers
        next_url = self.url
        while True:
            #  发送请求,获取响应
            data = self.get_data(next_url)
            #  从响应中提取数据(数据和翻页用的url)
            data_list, next_url, a = self.parse_data(data)
            self.save_data(data_list, a)
            #  判断是否终结
            if next_url == None:
                break


if __name__ == '__main__':
    tieba = Tieba('上海理工大学')
    tieba.run()

运行效果:

  • 2
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值