抓取某店铺的ebayno

import random
from http.cookiejar import CookieJar
import requests
from bs4 import BeautifulSoup
import numpy as np
import re
import os
from queue import Queue
import time
import random
import threading
import logging
import pandas as pd
import shutil

class EbaySpider(object):
    def __init__(self):
        self.SESSION = requests.session()
        self.SESSION.cookies = CookieJar()
        self.HEAD = self.randHeader()

    def randHeader(self):
        head_connection = ['Keep-Alive', 'close']
        head_accept = ['text/html, application/xhtml+xml, */*']
        head_accept_language = ['zh-CN,fr-FR;q=0.5', 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3']
        head_user_agent = ['Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
                           'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
                           'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
                           'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
                           'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
                           'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
                           'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
                           'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',
                           'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
                           'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']

        header = {
            'Connection': head_connection[0],
            'Accept': head_accept[0],
            'Accept-Language': head_accept_language[1],
            'User-Agent': head_user_agent[random.randrange(0, len(head_user_agent))]
        }
        return header

    def getBeautifulSoup(self, query_rl):
        r = self.SESSION.get(url=query_rl, headers=self.HEAD)
        soup = BeautifulSoup(r.text, 'html.parser')
        return soup

    def getRates(self):
        query_rl = "https://www.ebay.com/sch/i.html?_from=R40&_sacat=0&_ipg=100&rt=nc&_nkw=window regulator&_pgn=1&_skc=0"
        r = self.SESSION.get(url=query_rl, headers=self.HEAD)
        soup = BeautifulSoup(r.text, 'html.parser')
        content = soup.find("span", "rcnt")
        itemSize = int(str(content.string).replace(",",""))
        # print("初次查询:" + str(itemSize) + "项")
        #获取第一个ebayno,将收货地址更改
        itm = soup.find("div", "lvpic pic img left")['iid']
        # print("设置shippingandpayments为美国US")
        getrates_url = "http://www.ebay.com/itm/getrates?item=" + itm + "&country=1&co=0&cb=jQuery1705349737076189762_1501724760425"
        r = self.SESSION.get(url=getrates_url, headers=self.HEAD) #发请求,保存cookie

    #从ebay上抓取关键字搜索到的相关的ebayno
    def search(self, seller,outdirectory):
        # _ipg=100每页100 ,
        file = outdirectory+str(seller)+".xlsx"
        if os.path.exists(file):
            return
        print(seller)
        # exit()
        # query_rl = "https://www.ebay.com/sch/Mirrors/33649/m.html?&_ssn="+seller+"&_ipg=100"#品类
        # https://www.ebay.com/sch/m.html?_ssn=goody4less&rt=nc # 不分品类
        query_rl ="https://www.ebay.com/sch/m.html?&_ssn="+seller+"&_ipg=100"
        self.getRates() #默认情况是china收货地址,搜索的结果条数少,将收货地址改为US,搜索条数全
        soup = self.getBeautifulSoup(query_rl)
        content = soup.find("span", "rcnt")  # 抓取网页上总共条数
        itemSize = int(content.string.replace(",", "")) #处理1,000有“,”的情况
        pageSize = int(itemSize / 100)
        if itemSize % 100 != 0:
            pageSize += 1

        print("总计" + str(itemSize) + "项,共" + str(pageSize) + "页(每页100条)")
        result = []
        for _pgn in range(1,pageSize+1):
            print("第%d页..."%_pgn)
            _skc = (_pgn-1)*100
            # https://www.ebay.com/sch/m.html?_sop=12&_ssn=goody4less&_pgn=3&_skc=100&rt=nc
            query_rl = "https://www.ebay.com/sch/m.html?_ipg=100&rt=nc&_ssn="+seller+"&_pgn="+str(_pgn)+"&_skc="+str(_skc)
            soup = self.getBeautifulSoup(query_rl)
            lists = soup.find_all("div", "lvpic pic img left")
            for i in lists:
                ebayno = i["iid"]
                result.append([seller, ebayno])
        df = pd.DataFrame(result, columns=["seller","ebayno"])
        df = df.drop_duplicates()
        df["rank"]= df.index
        df.to_excel(file, index=False)


class ThreadCrawl(threading.Thread): #ThreadCrawl类继承了Threading.Thread类

    def __init__(self, queue,outdirectory):  #子类特有属性, queue
        FORMAT = time.strftime("[%Y-%m-%d %H:%M:%S]", time.localtime()) + "[AmazonSpider]-----%(message)s------"
        logging.basicConfig(level=logging.INFO, format=FORMAT)
        threading.Thread.__init__(self)
        self.queue = queue
        self.spider = EbaySpider()  #子类特有属性spider, 并初始化,将实例用作属性
        self.outdirectory = outdirectory

    def run(self):
        while True:
            item = self.queue.get() #调用队列对象的get()方法从队头删除并返回一个项目item
            self.spider.search(item,self.outdirectory)  # 调用实例spider的方法getDataById(item)
            logging.info("now queue size is: %d" % self.queue.qsize()) #队列对象qsize()方法,返回队列的大小
            self.queue.task_done() #队列对象在完成一项工作后,向任务已经完成的队列发送一个信号

class EbaySpiderJob():

    def __init__(self , size , qs ,file):
        self.size = size  # 将形参size的值存储到属性变量size中
        self.qs = qs
        self.file = file

    def work(self):
        toSpiderQueue = Queue() #创建一个Queue队列对象
        for i in range(self.size):
            t = ThreadCrawl(toSpiderQueue,self.file)    #将实例用到一个类的方法中
            t.setDaemon(True)
            t.start()
        for q in self.qs:
            toSpiderQueue.put(q)  #调用队列对象的put()方法,在对尾插入一个项目item
        toSpiderQueue.join()    #队列对象,等到队列为空,再执行别的操作
def combine_data(combine_directory,outfile): # 合并文件夹
    # 合并数据
    dataframe = []
    for root, path, files in os.walk(combine_directory):
        if  not files: # 文件夹为空
            return False
        for file in files:
            names = '//'.join([root, file])
            temp = pd.read_excel(names)
            dataframe.append(temp)
    temp = pd.concat(dataframe)
    temp.to_excel(outfile, index=False)
    # return True
def file_number(directory):
    return len([x for x in os.listdir(os.path.dirname(directory))])
def del_directory(directory): # 删除目录
    shutil.rmtree(directory)
def create_directory(directory): # 创建目录
    if not os.path.exists(directory):  #创建一个文件夹来存放抓取的数据
        os.makedirs(directory)

def main(outdirectory):
    qs = ["goody4less", "motors_zone"]
    print(qs)
    while len(qs) != file_number(outdirectory):
        amazonJob = EbaySpiderJob(8, qs, outdirectory)  # 实例化对象
        amazonJob.work()  # 调用对象方法
    # 抓取数据部分   end
if __name__ == '__main__':

    outdirectory = "D:/Pythonwork/shop_all_ebaynos/"
    outfile = "D:/Pythonwork/shop_all_ebaynos.xlsx"
    # 创建文件
    create_directory(outdirectory)
    filenumber = file_number(outdirectory)
    print(filenumber)
    # 爬虫并存储到outdirectory
    main(outdirectory)
    #合并文件夹数据
    combine_data(outdirectory,outfile)







  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值