python微博爬虫——使用selenium爬取关键词下超话内容

最近微博手机端的页面发生了些微的变化,导致了我之前的两篇文章微博任意关键词爬虫——使用selenium模拟浏览器来!用python爬一爬“不知知网翟博士”的微博超话中的代码出现了一些报错情况,这里来修改一下

欢迎关注公众号:老白和他的爬虫

1.微博手机端出现的变化

爬取手机端的微博好处在于能够爬取比网页端更多的数据,因为网页端微博内容一般限定在50页,数据量不够大,所以选择爬取手机端,这样可以一直往下“刷”,出现新的微博

在之前的代码中,微博手机端超话页面是这样的

但是这几天在爬取的过程中发现微博超话的页面变成了这样
image.png
这样一眼就可以看到区别吧,就是超话的名称、阅读量、讨论数不见了,所以现在运行代码会报错,这一点也很好解决,我们只需要提前在这个页面提前获取我们需要的超话的名称、阅读量、讨论数就可以了

2.代码修改

代码相比于之前,主要修改了超话的名称、阅读量、讨论数的获取
方式,修改后的weiboTest.py代码如下,如遇到问题可到后台留言

import time
import xlrd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import excelSave as save

# 用来控制页面滚动
def Transfer_Clicks(browser):
    try:
        browser.execute_script("window.scrollBy(0,document.body.scrollHeight)", "")
    except:
        pass
    return "Transfer successfully \n"

#判断页面是否加载出来
def isPresent():
    temp =1
    try: 
        driver.find_elements_by_css_selector('div.line-around.layout-box.mod-pagination > a:nth-child(2) > div > select > option')
    except:
        temp =0
    return temp

#把超话页面滚动到底
def SuperwordRollToTheEnd():
    before = 0 
    after = 0
    n = 0
    timeToSleep = 50
    while True:
        before = after
        Transfer_Clicks(driver)
        time.sleep(3)
        elems = driver.find_elements_by_css_selector('div.m-box')
        print("当前包含超话最大数量:%d,n当前的值为:%d,当n为5无法解析出新的超话" % (len(elems),n))
        after = len(elems)
        if after > before:
            n = 0
        if after == before:        
            n = n + 1
        if n == 5:
            print("当前包含最大超话数为:%d" % after)
            break
        if after > timeToSleep:
            print("抓取到%d多条超话,休眠30秒" % timeToSleep)
            timeToSleep = timeToSleep + 50
            time.sleep(30)
#插入数据
def insert_data(elems,path,name,yuedu,taolun):
    for elem in elems:
        workbook = xlrd.open_workbook(path)  # 打开工作簿
        sheets = workbook.sheet_names()  # 获取工作簿中的所有表格
        worksheet = workbook.sheet_by_name(sheets[0])  # 获取工作簿中所有表格中的的第一个表格
        rows_old = worksheet.nrows  # 获取表格中已存在的数据的行数       
        rid = rows_old
        #用户名
        weibo_username = elem.find_elements_by_css_selector('h3.m-text-cut')[0].text
        weibo_userlevel = "普通用户"
        #微博等级
        try: 
            weibo_userlevel_color_class = elem.find_elements_by_css_selector("i.m-icon")[0].get_attribute("class").replace("m-icon ","")
            if weibo_userlevel_color_class == "m-icon-yellowv":
                weibo_userlevel = "黄v"
            if weibo_userlevel_color_class == "m-icon-bluev":
                weibo_userlevel = "蓝v"
            if weibo_userlevel_color_class == "m-icon-goldv-static":
                weibo_userlevel = "金v"
            if weibo_userlevel_color_class == "m-icon-club":
                weibo_userlevel = "微博达人"     
        except:
            weibo_userlevel = "普通用户"
        #微博内容
        weibo_content = elem.find_elements_by_css_selector('div.weibo-text')[0].text
        shares = elem.find_elements_by_css_selector('i.m-font.m-font-forward + h4')[0].text
        comments = elem.find_elements_by_css_selector('i.m-font.m-font-comment + h4')[0].text
        likes = elem.find_elements_by_css_selector('i.m-icon.m-icon-like + h4')[0].text
        #发布时间
        weibo_time = elem.find_elements_by_css_selector('span.time')[0].text
        print("用户名:"+ weibo_username + "|"
              "微博等级:"+ weibo_userlevel + "|"
              "微博内容:"+ weibo_content + "|"
              "转发:"+ shares + "|"
              "评论数:"+ comments + "|"
              "点赞数:"+ likes + "|"
              "发布时间:"+ weibo_time + "|"
              "话题名称" + name + "|" 
              "话题讨论数" + yuedu + "|"
              "话题阅读数" + taolun)
        value1 = [[rid, weibo_username, weibo_userlevel,weibo_content, shares,comments,likes,weibo_time,keyword,name,yuedu,taolun],]
        print("当前插入第%d条数据" % rid)
        save.write_excel_xls_append_norepeat(book_name_xls, value1)
#获取当前页面的数据
def get_current_weibo_data(elems,book_name_xls,name,yuedu,taolun,maxWeibo):
    #开始爬取数据
        before = 0 
        after = 0
        n = 0 
        timeToSleep = 300
        while True:
            before = after
            Transfer_Clicks(driver)
            time.sleep(3)
            elems = driver.find_elements_by_css_selector('div.card.m-panel.card9')
            print("当前包含微博最大数量:%d,n当前的值为:%d, n值到5说明已无法解析出新的微博" % (len(elems),n))
            after = len(elems)
            if after > before:
                n = 0
            if after == before:        
                n = n + 1
            if n == 5:
                print("当前关键词最大微博数为:%d" % after)
                insert_data(elems,book_name_xls,name,yuedu,taolun)
                break
            if len(elems)>maxWeibo:
                print("当前微博数以达到%d条"%maxWeibo)
                insert_data(elems,book_name_xls,name,yuedu,taolun)
                break
            if after > timeToSleep:
                print("抓取到%d多条,插入当前新抓取数据并休眠30秒" % timeToSleep)
                timeToSleep = timeToSleep + 300
                insert_data(elems,book_name_xls,name,yuedu,taolun) 
                time.sleep(30) 
#点击超话按钮,获取超话页面
def get_superWords():
    time.sleep(5)
    elem = driver.find_element_by_xpath("//*[@class='scroll-box nav_item']/ul/li/span[text()='话题']")
    elem.click()
    #获取所有超话
    SuperwordRollToTheEnd()
    elemsOfSuper = driver.find_elements_by_css_selector('div.card.m-panel.card26')   
    return elemsOfSuper

#获取超话链接、名称、讨论量、阅读量
def get_superwordsUrl():
    elemsOfSuper = get_superWords()
    superWords_url = []
    for i in range(0,len(elemsOfSuper)):
        superwordsInfo = []
        print("当前获取第%d个超话链接,共有%d个超话"% (i+1,len(elemsOfSuper)))
        time.sleep(1)      
        element = driver.find_elements_by_css_selector('div.card.m-panel.card26')[i]
        name = driver.find_elements_by_css_selector('div.card.m-panel.card26 h3')[i].text
        yuedu_taolun = driver.find_elements_by_css_selector('div.card.m-panel.card26 h4:nth-last-child(1)')[i].text
        yuedu = yuedu_taolun.split(" ")[0]
        taolun = yuedu_taolun.split(" ")[1]
        #获取话题名称,话题讨论数,阅读数   
        print(name)
        print(taolun)
        print(yuedu)
        #获取超话链接
        driver.execute_script('arguments[0].click()',element)
        time.sleep(3)
        print(driver.current_url)
        #把链接和超话信息一起存放于列表中
        superwordsInfo = [driver.current_url,name,taolun,yuedu]
        superWords_url.append(superwordsInfo)
        driver.back()
    return superWords_url
#爬虫运行 
def spider(username,password,driver,book_name_xls,sheet_name_xls,keyword,maxWeibo):
    
    #创建文件
    if os.path.exists(book_name_xls):
        print("文件已存在")
    else:
        print("文件不存在,重新创建")
        value_title = [["rid", "用户名称", "微博等级", "微博内容", "微博转发量","微博评论量","微博点赞","发布时间","搜索关键词","话题名称","话题讨论数","话题阅读数"],]
        save.write_excel_xls(book_name_xls, sheet_name_xls, value_title)
    
    #加载驱动,使用浏览器打开指定网址  
    driver.set_window_size(452, 790)
    driver.get("https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=https%3A%2F%2Fm.weibo.cn%2F")  
    time.sleep(3)
    #登陆
    elem = driver.find_element_by_xpath("//*[@id='loginName']");
    elem.send_keys(username)
    elem = driver.find_element_by_xpath("//*[@id='loginPassword']");
    elem.send_keys(password)
    elem = driver.find_element_by_xpath("//*[@id='loginAction']");
    elem.send_keys(Keys.ENTER) 
    time.sleep(5)
    #判断页面是否加载出
    while 1:  # 循环条件为1必定成立
        result = isPresent()
        print ('判断页面1成功 0失败  结果是=%d' % result )
        if result == 1:
            elems = driver.find_elements_by_css_selector('div.line-around.layout-box.mod-pagination > a:nth-child(2) > div > select > option')
            #return elems #如果封装函数,返回页面
            break
        else:
            print ('页面还没加载出来呢')
            time.sleep(20)
    time.sleep(5)
    #搜索关键词
    elem = driver.find_element_by_xpath("//*[@class='m-text-cut']").click();
    time.sleep(5)
    elem = driver.find_element_by_xpath("//*[@type='search']");
    elem.send_keys(keyword)
    elem.send_keys(Keys.ENTER)
   
    superWords_url = get_superwordsUrl()
    print("超话链接获取完毕,休眠5秒")
    time.sleep(5)
    for url in superWords_url:
        driver.get(url[0])  
        time.sleep(3)
        name = url[1]  
        taolun = url[2]
        yuedu = url[3]
        get_current_weibo_data(elems,book_name_xls,name,yuedu,taolun,maxWeibo) #爬取综合
        time.sleep(3)
        shishi_element = driver.find_element_by_xpath("//*[@class='scroll-box nav_item']/ul/li/span[text()='实时']")
        driver.execute_script('arguments[0].click()',shishi_element) 
        get_current_weibo_data(elems,book_name_xls,name,yuedu,taolun,maxWeibo) #爬取实时
        time.sleep(5)
        remen_element = driver.find_element_by_xpath("//*[@class='scroll-box nav_item']/ul/li/span[text()='热门']")
        driver.execute_script('arguments[0].click()',remen_element)
        get_current_weibo_data(elems,book_name_xls,name,yuedu,taolun,maxWeibo) #爬取热门
    
if __name__ == '__main__':
    username = "" #你的微博登录名
    password = "" #你的密码
    driver = webdriver.Chrome('/Users/Desktop/python/weibo_keyword/chromedriver')#你的chromedriver的地址
    book_name_xls = "/Users/Desktop/weibo.xls" #填写你想存放excel的路径,没有文件会自动创建
    sheet_name_xls = '微博数据' #sheet表名
    maxWeibo = 1000 #设置最多多少条微博,如果未达到最大微博数量可以爬取当前已解析的微博数量
    keywords = ["翟天临学术",] #输入你想要的关键字,可以是多个关键词的列表的形式
    for keyword in keywords:
        spider(username,password,driver,book_name_xls,sheet_name_xls,keyword,maxWeibo)

数据存储的excelSave.py没有变

import xlrd
import xlwt
from xlutils.copy import copy

def write_excel_xls(path, sheet_name, value):
    index = len(value)  # 获取需要写入数据的行数
    workbook = xlwt.Workbook()  # 新建一个工作簿
    sheet = workbook.add_sheet(sheet_name)  # 在工作簿中新建一个表格
    for i in range(0, index):
        for j in range(0, len(value[i])):
            sheet.write(i, j, value[i][j])  # 像表格中写入数据(对应的行和列)
    workbook.save(path)  # 保存工作簿
    print("xls格式表格写入数据成功!")

def read_excel_xls(path):
    data = []
    workbook = xlrd.open_workbook(path)  # 打开工作簿
    sheets = workbook.sheet_names()  # 获取工作簿中的所有表格
    worksheet = workbook.sheet_by_name(sheets[0])  # 获取工作簿中所有表格中的的第一个表格
    if worksheet.nrows == 1:
        print("目前是第一行")
    else:
        for i in range(1, worksheet.nrows): #从第二行取值
            dataTemp = []
            for j in range(0, worksheet.ncols):
                #print(worksheet.cell_value(i, j), "\t", end="")  # 逐行逐列读取数据
                dataTemp.append(worksheet.cell_value(i, j))
            data.append(dataTemp)
    return data
     
def write_excel_xls_append_norepeat(path, value):
    workbook = xlrd.open_workbook(path)  # 打开工作簿
    sheets = workbook.sheet_names()  # 获取工作簿中的所有表格
    worksheet = workbook.sheet_by_name(sheets[0])  # 获取工作簿中所有表格中的的第一个表格
    rows_old = worksheet.nrows  # 获取表格中已存在的数据的行数
    new_workbook = copy(workbook)  # 将xlrd对象拷贝转化为xlwt对象
    new_worksheet = new_workbook.get_sheet(0)  # 获取转化后工作簿中的第一个表格
    rid = 0
    for i in range(0, len(value)):
        data = read_excel_xls(path)
        data_temp = []
        for m in range(0,len(data)):
            data_temp.append(data[m][1:len(data[m])])
        value_temp = []
        for m in range(0,len(value)):
            value_temp.append(value[m][1:len(value[m])])
        
        if value_temp[i] not in data_temp:
            for j in range(0, len(value[i])):
                new_worksheet.write(rid+rows_old, j, value[i][j])  # 追加写入数据,注意是从i+rows_old行开始写入
            rid = rid + 1
            new_workbook.save(path)  # 保存工作簿
            print("xls格式表格【追加】写入数据成功!")
        else:
            print("数据重复")

完整代码及驱动,后台回复“20190414”获取

最后关注一波吧

  • 5
    点赞
  • 61
    评论
  • 31
    收藏
  • 一键三连
    一键三连
  • 扫一扫,分享海报

©️2021 CSDN 皮肤主题: 技术黑板 设计师:CSDN官方博客 返回首页
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、C币套餐、付费专栏及课程。

余额充值