爬天猫,京东品牌

淘宝品牌

京东

# -*- coding:utf-8 -*-
# 爬虫_selenium

import json
import itertools

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from bs4 import BeautifulSoup
import time
import pandas as pd 
import numpy as np 


class Spider:
    def __init__(self):
        self.setOpts()
        self.bw = webdriver.Chrome(chrome_options=self.options,executable_path='./Chrome/chromedriver')
        self.fo = open('down.txt','a',encoding='utf-8')
        self.item_cnt = itertools.count(1)
        self.errors = []


    def __del__(self):
        self.bw.close()
        self.fo.close()
        print('error word',self.errors)


    def setOpts(self):
        '''
        模拟器设置
        '''
        self.options = webdriver.ChromeOptions();
        self.options.add_argument('--log-level=3')
        self.options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36')
        

    def save(self, obj):
        '''
        储存
        '''
        self.fo.write(json.dumps(obj,ensure_ascii=False)+'\n')


    def parse(self, html, word):
        '''
        解析页面
        '''
        soup = BeautifulSoup(html,'lxml')
        ct = itertools.count(1)

        brands = soup.select(self.sle_node1)
        if not brands:
            brands = soup.select(self.sle_node2)

        brands = [brand.get_text().strip() for brand in brands]
        for brand in brands:
            obj = {}
            obj['word']=word
            obj['sort'] = next(ct)
            obj['brand'] = brand
            obj['host'] = self.host
            self.save(obj)


    def control(self,word):
        '''
        控制
        '''
        wait = WebDriverWait(self.bw, 5)
        
        wait.until(EC.presence_of_element_located((By.ID, self.search_inp_id)))
        self.bw.find_element(By.XPATH, self.search_inp).clear()
        self.bw.find_element(By.XPATH, self.search_inp).send_keys(word)
        time.sleep(1)

        # 点击搜索
        wait.until(EC.presence_of_element_located((By.ID, self.search_but_id)))
        self.bw.find_element(By.XPATH, self.search_but).click()
        time.sleep(2)

        # 点击更多
        wait.until(EC.presence_of_element_located((By.ID, self.expand_but_id)))
        self.bw.find_element(By.XPATH, self.expand_but).click()
        time.sleep(2)

        self.parse(self.bw.page_source,word)
        

    def setAttrs(self):
        self.login_url = 'https://passport.jd.com/uc/login?'
        self.start_url = 'https://search.jd.com/Search?'

        self.search_inp_id = 'key'
        self.search_inp = '//*[@id="key"]'
        self.search_but_id = 'search-2014'
        self.search_but = '//*[@id="search-2014"]/div/button'
        self.expand_but_id = 'J_selector'
        self.expand_but = '//*[@id="J_selector"]/div[1]/div/div[3]/a[2]'

        self.host = 'JD'
        self.sle_node1 = '#J_selector > div.J_selectorLine.s-brand > div > div.sl-value > div.sl-v-logos > ul li a'
        self.sle_node2 = '#J_selector > div.J_selectorLine.s-brand > div > div.sl-value > div.sl-v-list > ul li a'


    def login(self):
        '''
        登录
        '''
        self.bw.get(self.login_url)
        input('登录后回车...')
        self.bw.get(self.start_url)
        time.sleep(3)


    def getWords(self):
        '''
        过去链接列表
        '''
        # words = pd.read_excel('搜索依据.xlsx',sheet_name=0)['word'].tolist()
        words=['衣架', '保鲜膜', '雨伞', '记忆枕', '毛巾', '饭盒', '洗手液', '油漆', '墙贴', '台灯', '节能灯', '数据线', '充电宝', '摄影']
        return words


    def action(self):
        '''
        执行
        '''
        self.setAttrs()
        self.login()
        for word in self.getWords():
            try:
                self.control(word)
                print('num【%s】'%next(self.item_cnt),word,'is ok...')
            except Exception as e:
                self.errors.append(word)
                print(word,'error',repr(e))



if __name__=='__main__':
    sp= Spider();
    sp.action();

天猫

# -*- coding:utf-8 -*-
# 爬虫_selenium

import json
import itertools

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

from bs4 import BeautifulSoup
import time
import pandas as pd 
import numpy as np 


class Spider:
    def __init__(self):
        self.setOpts()
        self.bw = webdriver.Chrome(chrome_options=self.options,executable_path='./Chrome/chromedriver')
        self.fo = open('down.txt','a',encoding='utf-8')
        self.item_cnt = itertools.count(1)
        self.errors = []


    def __del__(self):
        self.bw.close()
        self.fo.close()
        print('error word',self.errors)


    def setOpts(self):
        '''
        模拟器设置
        '''
        self.options = webdriver.ChromeOptions();
        self.options.add_argument('--log-level=3')
        self.options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36')
        

    def save(self, obj):
        '''
        储存
        '''
        self.fo.write(json.dumps(obj,ensure_ascii=False)+'\n')


    def parse(self, html, word):
        '''
        解析页面
        '''
        soup = BeautifulSoup(html,'lxml')
        ct = itertools.count(1)
        brands = soup.select(self.sle_node)
        brands = [brand.get_text() for brand in brands]
        for brand in brands:
            obj = {}
            obj['word']=word
            obj['sort'] = next(ct)
            obj['brand'] = brand
            obj['host'] = self.host
            self.save(obj)


    def control(self,word):
        '''
        控制
        '''
        wait = WebDriverWait(self.bw, 5)

        self.bw.find_element(By.XPATH, self.search_inp).clear()
        self.bw.find_element(By.XPATH, self.search_inp).send_keys(word)

        # 搜索
        wait.until(EC.presence_of_element_located((By.ID, self.search_but_id)))
        self.bw.find_element(By.XPATH, self.search_but).click()
        time.sleep(3)

        # 更多
        wait.until(EC.presence_of_element_located((By.ID, self.expand_but_id)))
        self.bw.find_element(By.XPATH, self.expand_but).click()
        time.sleep(2)

        self.parse(self.bw.page_source, word)
        

    def setAttrs(self):
        self.login_url = 'https://login.tmall.com/'
        self.start_url = 'https://www.tmall.com/'

        self.search_inp = '//*[@id="mq"]'
        self.search_but_id = 'mallSearch'
        self.search_but = '//*[@id="mallSearch"]/form/fieldset/div/button'
        self.expand_but_id = 'J_NavAttrsForm'
        self.expand_but = '//*[@id="J_NavAttrsForm"]/div/div[1]/div/div[2]/div[2]/a[2]'

        self.host = 'tmall'
        self.sle_node = '#J_NavAttrsForm > div > div.brandAttr.j_nav_brand > div > div.attrValues > ul > li a'


    def login(self):
        '''
        登录
        '''
        self.setAttrs()
        self.bw.get(self.login_url); input('登录后回车...')
        self.bw.get(self.start_url); time.sleep(3)


    def getWords(self):
        '''
        过去链接列表
        '''
        words = pd.read_excel('搜索依据.xlsx',sheet_name=0)['word'].tolist()
        # words = ['熨斗', '洗衣机', '电视', '榨汁机', '电饭煲', '固态硬盘', '存储卡', '鼠标']
        return words


    def action(self):
        '''
        执行
        '''
        self.login()
        for word in self.getWords():
            try:
                self.control(word)
                print('num【%s】'%next(self.item_cnt),word,'is ok...')
            except Exception as e:
                self.errors.append(word)
                print(word,'error',repr(e))



if __name__=='__main__':
    sp= Spider();
    sp.action();
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值