selenium+pyquery爬取京东商品

踩坑
在使用pyquery时,对于id或class类都可以成功选取,但是直接使用标签名时选区时一直返回空值,获取不到节点。原来是因为pyquery默认解析后的文档是xmlns格式,这造成了无法按标签名称去选取
解决方法
html = browser.page_source
doc = pq(html, parser = ‘html’)

from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq
import time
import pymongo


# chrome_option = webdriver.ChromeOptions()
# chrome_option.add_argument('--headless')
# browser = webdriver.Chrome(chrome_option=chrome_option)
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10)


def search():
    browser.get('https://www.jd.com')
    try:
        key_input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#key')))
        submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#search > div > div.form > button > i')))
        key_input.clear()
        key_input.send_keys('年货')
        submit.click()
        browser.execute_script("window.scrollTo(0, document.body.scrollHeight)")
        time.sleep(3)
        total = wait.until(EC.presence_of_element_located((
            By.CSS_SELECTOR, '#J_bottomPage > span.p-skip > em:nth-child(1) > b')))
        get_product()
        return int(total.text)
    except TimeoutException:
        return search()


def next_page(page_number):
    try:
        num_input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_bottomPage > span.p-skip > input')))
        submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_bottomPage > span.p-skip > a')))
        num_input.clear()
        num_input.send_keys(page_number)
        submit.click()
        browser.execute_script("window.scrollTo(0, document.body.scrollHeight)")
        time.sleep(3)
        wait.until(EC.text_to_be_present_in_element(
            (By.CSS_SELECTOR, '#J_bottomPage > span.p-num > a.curr'), str(page_number)))
        get_product()
    except TimeoutException:
        return next_page(page_number)


def get_product():
    wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_goodsList .clearfix .gl-item')))
    html = browser.page_source
    doc = pq(html, parser='html')
    items = doc('#J_goodsList .gl-warp.clearfix .gl-item .gl-i-wrap').items()
    for item in items:
        product = {
            'image': item.find('.p-img a img').attr('data-lazy-img'),
            'price': item.find('.p-price strong i').text(),
            'comment': item.find('.p-commit strong a').text(),
            'name': item.find('.p-name.p-name-type-2 a em').text().replace('\n', ' '),
            'shop': item.find('.p-shop').text()
        }
        print(product)
        save_to_mongo(product)


def save_to_mongo(result):
    try:
        if db[MONGO_TABLE].insert(result):
            print('存储到MongDB成功', result)
    except Exception:
        print('存储到MongDB失败', result)


def main():
    total = search()
    for i in range(2, total+1):
        next_page(i)
    browser.close()


MONGO_URL = 'localhost'
MONGO_DB = 'jd'
MONGO_TABLE = '年货'
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]


if __name__ == '__main__':
    main()

参考 崔庆才老师使用Selenium爬取淘宝商品
python中pyquery无法获取标签名的dom节点

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值