Python3.6 微小宝微信公众号文章抓爬

之前同步了Python3.6 搜狗微信公众号抓爬,但搜狗设置了微信公众号文章反抓爬机制,限制太死,而且没有太多时间研究他的反抓爬破解,只能换抓爬渠道了,所以写了一个微小宝抓爬微信公众号文章。还是利用原有的Python工程,详情请看https://blog.csdn.net/wudaoshihun/article/details/83552027

微小宝公众号文章抓爬采用selenium技术,所以需要安装selenium包,具体请看https://blog.csdn.net/wudaoshihun/article/details/83592681

 

具体步骤:

1. 模拟登录微小宝

2. 搜索要抓爬的微信公众号

3. 获取文章列表

 

下面是一个demo,可根据需求把微信公众号配置化,可借鉴上述文章内容。

 

# -*- coding: utf-8 -*-
from selenium.common.exceptions import WebDriverException, NoSuchElementException

from utils import config
import datetime
import logging.config
import random
from wechatsogou import *
from utils.tools import *
from utils.alert_is_present import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
import requests
import urllib.request
import os
from pyquery import PyQuery as pq
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from utils.config import *
from selenium.webdriver.support.wait import WebDriverWait
# 日志
from os import path
from wechatsogou import *
from utils.tools import *
log_file_path = path.join(path.dirname(path.abspath(__file__)), 'resources/china_stat.conf')
logging.config.fileConfig(log_file_path)
logger = logging.getLogger()
#数据库实例
mysql = mysql('table_carwler')

# 搜索API实例
wechats = WechatSogouApi() #不使用外部Cookie

today = datetime.date.today()


"""
启动函数
"""
def wechat_crawler_start(pid=0):
    print("启动抓爬")

    options = define_options(config.source_env)
    # driver地址
    brower = define_driver(options,config.source_env)

    table_crawler_function(brower,pid)

    brower.close()
    print("结束抓爬")

def define_options(source_env):
    options = webdriver.ChromeOptions()
    options.binary_location = binary_location #谷歌地址
    options.add_argument('--no-sandbox')#解决DevToolsActivePort文件不存在的报错

    options.add_argument('window-size=1920x3000') #指定浏览器分辨率
    options.add_argument('--disable-gpu') #谷歌文档提到需要加上这个属性来规避bug
    options.add_argument('--hide-scrollbars') #隐藏滚动条, 应对一些特殊页面
    options.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度

    if(source_env == 'linux'):
        options.add_argument('--headless') #浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
    else:
        pass
    return options


def define_driver(options,source_env):
    if(source_env == 'linux'):
        chromedriver = chrome_driver_binary
        os.environ["webdriver.chrome.driver"] = chromedriver
        brower = webdriver.Chrome(chrome_options=options,executable_path=chromedriver)
    else:
        brower = webdriver.Chrome(chrome_options=options)
        brower.set_window_size(1920,3000)
    return brower

"""
自动实现抓爬
"""
def table_crawler_function(brower,pid=0):
    print('父节点'+str(pid))
    #indicatorList =  query_list(pid)
    if pid==0:

        # print('循环中'+str(indicator['Id']))
        login_url = 'https://account.wxb.com/page/login?from=https://data.wxb.com/rank?category=-1'
        user = '//*[@id="root"]/div/div/div[2]/div[1]/span[2]' #账号登录
        username = '//*[@id="email"]'
        userpass = '//*[@id="password"]'
        login = '//*[@id="root"]/div/div/div[2]/form/button'

        brower.get(login_url)

        mouse_move_xpath_click(brower,user)
        time.sleep(1)
        click_xpath_str(brower,user)
        time.sleep(1)
        brower.find_element(By.XPATH,username).send_keys("#####")
        brower.find_element(By.XPATH,userpass).send_keys("#####")
        time.sleep(1)
        mouse_move_xpath_click(brower,login)
        time.sleep(1)
        indicatorList = ["1"]
        for indicator in indicatorList:

            search_xpth = '//*[@id="root"]/div/div[2]/header/div[2]/span/input'
            text = '//*[@id="query"]'
            mouse_move_xpath_click(brower,search_xpth)
            time.sleep(1)
            el = brower.find_element(By.XPATH,search_xpth);
            el.send_keys("tiandiwulianwang")
            el.send_keys(Keys.ENTER)
            time.sleep(3)
            whref = ''
            for link in brower.find_elements_by_xpath("//*[@href]"):#获取当前页面的href
                x = link.get_attribute('href')
                print(x)
                if '/details/postRead' in x:
                    whref = x
            print(whref)
            brower.get(whref)
            s = []
            for link1 in brower.find_elements_by_xpath("//*[@href]"):
                y = link1.get_attribute('href')
                if 'https://mp.weixin.qq.com/s?__biz' in y:
                    print(y)
                    s.append(y)

            for link2 in s:
                index_html_path = wechats.down_html(link2,'ruguo')


def mouse_move_text_click(brower, param):
    print("请求不到{},滚动条翻滚",param)
    target = brower.find_element_by_link_text(param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def mouse_move_xpath_click(brower, param):
    print("请求不到{},滚动条翻滚",param)
    wait = WebDriverWait(brower,10)
    wait.until(lambda brower: brower.find_element_by_xpath(param))
    target = brower.find_element(By.XPATH,param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def second_except_id_click(brower, param):
    logger.error("第二次点击ID={"+param+"}")
    brower.d('window.scrollTo(0,document.body.scrollHeight)')
    brower.find_element_by_id(param).click()

def except_text_click(brower, param):
    logger.error("请求不到,滚动条根据text定位")
    target = brower.find_element_by_link_text(param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def except_xpath_click(brower, param):
    logger.error("请求不到,滚动条根据xpath定位")
    target = brower.find_element(By.XPATH,param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
def except_id_click(brower, param):
    logger.error("请求不到,滚动条根据ID定位")
    target = brower.find_element(By.ID,param)
    brower.execute_script("arguments[0].scrollIntoView(false);",target)
    target.click()
"""
根据text点击
"""
def click_text(brower, indicator):

    r = brower.find_element_by_link_text(indicator['text_name'])
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来


def click_scroll_id(brower, param):
    time.sleep(1)
    try:
        mouse_move_text_click(brower,param)

        brower.find_element_by_id(param).click()
    except  NoSuchElementException as e:
        second_except_id_click(brower,param)
    except WebDriverException as e1:
        second_except_id_click(brower,param)

def click_text_str(brower, param):
    wait = WebDriverWait(brower,10)
    wait.until(lambda brower: brower.find_element_by_text(param))
    r = brower.find_element_by_link_text(param)
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来

def click_xpath(brower, indicator):
    r = brower.find_element(By.XPATH,indicator['craw_attr'])
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来
    #wait.until(EC.presence_of_element_located((By.ID,id)))

def click_xpath_str(brower, param):
    wait = WebDriverWait(brower,10)
    wait.until(lambda brower: brower.find_element_by_xpath(param))
    r = brower.find_element(By.XPATH,param)
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来

def click_id(brower, indicator):
    r = brower.find_element(By.ID,indicator['craw_attr'])
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来

def click_id_str(brower, param):
    r = brower.find_element(By.ID,param)
    ActionChains(brower).move_to_element(r).click(r).perform()
    wait=WebDriverWait(brower,10)#等待元素加载出来

"""SQL封装"""
def query_list(param):
    cur = mysql.conn.cursor()
    mysql.field('*')
    mysql.where(" p_id="+str(param)+" and is_used=0 and class_type=4")
    mysql.order_sql = " order by Id"

    indicatorList = mysql.find(0)
    return indicatorList

if __name__ == '__main__':
    r =  wechat_crawler_start(0)
 

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值