易班已发推文数据爬取

"""

本代码用于爬取易班已发表数据
(由于写这篇代码的时候就一页,就没设置翻页)
用到的库:selenium、csv、pandas、bs4、time、re
最终成果:形成易班推文数据csv格式
作者:UPC.故里
注:有成功概率,因为易班有时候有登录验证有时候没有

"""


import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup

"""
The By implementation.
"""
class By(object):
    """
    Set of supported locator strategies.
    """
    ID = "id"
    XPATH = "xpath"
    LINK_TEXT = "link text"
    PARTIAL_LINK_TEXT = "partial link text"
    NAME = "name"
    TAG_NAME = "tag name"
    CLASS_NAME = "class name"
    CSS_SELECTOR = "css selector"

#模拟进入易班
url= 'https://www.yiban.cn/Org/orglistShow/type/forum/puid/5370538#'  #初始网站页面
driver = webdriver.Firefox()
driver.get(url)
time.sleep(1)

#模拟输入账号密码  账号密码用自己的或者自己学院组织的!
stu_number = ''
stu_password = ''
'''
根据xpath查看账号和密码的id
stu_number_xpath = '//*[@id="account-txt"]'  #填写账号的位置的xpath --> id是un
stu_password_xpath = '//*[@id="password-txt"]'  #填写密码的位置的xpath --> id是pd
'''
driver.find_element_by_id('account-txt').send_keys(stu_number)  #模拟输入账号
time.sleep(1)
driver.find_element_by_id('password-txt').send_keys(stu_password)  #模拟输入密码
time.sleep(1)
sign_in_xpath = '//*[@id="login-btn"]'
ActionChains(driver).click(driver.find_element_by_xpath(sign_in_xpath)).perform()  #模拟鼠标点击确定
time.sleep(5)

#转到我发布的
ActionChains(driver).click(driver.find_element_by_xpath('/html/body/main/div/div[2]/div[2]/ul/li[3]/a')).perform()  #模拟鼠标点击确定我发布的
time.sleep(2)

a_text_list = []
span_text_list = []
for j in range(6): #翻几页就是数字级
    xTable = '/html/body/main/div/div[2]/div[3]'
    table = driver.find_element_by_xpath(xTable).get_attribute('innerHTML')
    soup = BeautifulSoup(table,'html.parser') #解析表单
    a = soup.find_all('a') #查找表内a标签
    for row in a:
        row_a_text = row.text
        a_text_list.extend([row_a_text])
    span = soup.find_all('span') #查找表内span标签
    for row in span:
        row_a_text = row.text
        span_text_list.extend([row_a_text])
    time.sleep(2)
    next_page = '/html/body/main/div/div[2]/div[4]/div/div/a[2]'
    element = driver.find_element_by_xpath(next_page)
    driver.execute_script("arguments[0].click();", element)
    time.sleep(2)

data = {'活动院系':span_text_list[::6],
        '推文标题':a_text_list[1::9],
        '推送板块':span_text_list[1::6],
        '推送时间':span_text_list[2::6],
        '阅读量':span_text_list[3::6],
        '点赞量':span_text_list[4::6],
        '评论量':span_text_list[5::6]}
dataframe = pd.DataFrame(data)
dataframe.to_csv('Yiban.csv',index=False, sep=',',encoding='utf-8-sig')

driver.close()
已标记关键词 清除标记
©️2020 CSDN 皮肤主题: 黑客帝国 设计师:白松林 返回首页