多sku对应的市场销量---非多线程

ebay_ebayno.py


import random
from http.cookiejar import CookieJar
import requests
from bs4 import BeautifulSoup
import csv
import numpy as np
import re
import xlrd
from my_feedback_ebayno import Feedback_ebayno

class EbaySpider(object):
    def __init__(self):
        self.SESSION = requests.session()
        self.SESSION.cookies = CookieJar()
        # print(self.SESSION.cookies)
        self.HEAD = self.randHeader()

    def randHeader(self):
        head_connection = ['Keep-Alive', 'close']
        head_accept = ['text/html, application/xhtml+xml, */*']
        head_accept_language = ['zh-CN,fr-FR;q=0.5', 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3']
        head_user_agent = ['Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
                           'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
                           'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
                           'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
                           'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
                           'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
                           'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
                           'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
                           'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',
                           'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
                           'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
                           'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']

        header = {
            'Connection': head_connection[0],
            'Accept': head_accept[0],
            'Accept-Language': head_accept_language[1],
            'User-Agent': head_user_agent[random.randrange(0, len(head_user_agent))]
        }
        return header

    def getBeautifulSoup(self, query_rl):
        r = self.SESSION.get(url=query_rl, headers=self.HEAD)
        # print(self.SESSION.cookies)
        soup = BeautifulSoup(r.text, 'html.parser')
        return soup

    def getRates(self, query_url):
        r = self.SESSION.get(url=query_url, headers=self.HEAD)
        # print(self.SESSION.cookies)
        soup = BeautifulSoup(r.text, 'html.parser')
        content = soup.find("span", "rcnt")
        str(content.string).replace(",","")
        itemSize = int(str(content.string).replace(",",""))
        print("初次查询:" + str(itemSize) + "项")
        #获取第一个ebayno,将收货地址更改
        itm = soup.find("div", "lvpic pic img left")['iid']
        print("设置shippingandpayments为美国US")
        getrates_url = "http://www.ebay.com/itm/getrates?item=" + itm + "&country=1&co=0&cb=jQuery1705349737076189762_1501724760425"
        r = self.SESSION.get(url=getrates_url, headers=self.HEAD) #发请求,保存cookie

    #从ebay上抓取关键字搜索到的相关的ebayno,ebayprice,将ebayno ebayprice数组返回
    def search(self, serchWords):
        serchWords = str(serchWords).replace(" ", "%20")
        # _ipg=100每页100 ,
        query_rl = "https://www.ebay.com/sch/i.html?_from=R40&_sacat=0&_ipg=100&rt=nc&_nkw=" + serchWords + "&_pgn=1&_skc=0"
        self.getRates(query_url=query_rl) #默认情况是china收货地址,搜索的结果条数少,将收货地址改为US,搜索条数全

        soup = self.getBeautifulSoup(query_rl)
        content = soup.find("span", "rcnt")  # 抓取网页上总共条数
        itemSize = int(content.string.replace(",", "")) #处理1,000有“,”的情况
        pageSize = int(itemSize / 100)
        if itemSize % 100 != 0:
            pageSize += 1

        print("总计" + str(itemSize) + "项,共" + str(pageSize) + "页(每页100条)")
        print("第1页....")
        result = []
        # 抓取每一个商品
        content = soup.find_all(attrs={'class': re.compile('^sresult lvresult clearfix li')})
        for i in content:
            # 抓取商品的eBayno
            ebayno = i.find_all("div", "lvpic pic img left")[0]['iid']
            # 抓取商品的价格
            price_content = i.li.find("span", "bold")
            cur = price_content.b.string  # 币种
            s = str(price_content)
            price_ = re.findall('\d*,?\d*,?\d+\.\d+', s)
            price = str(price_[0]).replace(",", "")  # 价格
            result.append([ebayno, cur, price])
        #考虑有的关键词搜索的条数上千条时,只截取前600个ebayno即可
        if pageSize >= 6 :
            fpage = 6
            print("查询条数超过600,截取前600条")
        else:
            fpage = pageSize
        for _pgn in range(2, fpage + 1):
            print("第" + str(_pgn) + "页....")
            query_rl = "https://www.ebay.com/sch/i.html?_from=R40&_sacat=0&_ipg=100&rt=nc&_nkw=" + serchWords + "&_pgn=" + str(
                _pgn) + "&_skc=" + str((_pgn - 1) * 100)
            soup = self.getBeautifulSoup(query_rl)
            # 抓取每一个商品
            content = soup.find_all(attrs={'class': re.compile('^sresult lvresult clearfix li')})
            for i in content:
                # 抓取商品的eBayno
                ebayno = i.find_all("div", "lvpic pic img left")[0]['iid']
                # 抓取商品的价格
                price_content = i.li.find("span", "bold")
                cur = price_content.b.string  # 币种
                s = str(price_content)
                price_ = re.findall('\d*,?\d*,?\d+\.\d+', s)
                price = str(price_[0]).replace(",", "")  # 价格
                result.append([ebayno, cur, price])
        return result
    # 从文件中读到sku对应的关键字,返回sku 关键字 种类数组
    def getSkuAndKeyWord(self):
        data = xlrd.open_workbook("window regulator_test.xlsx")  # 打开excel
        table = data.sheet_by_index(0)  # 读sheet
        nrows = table.nrows  # 获得行数
        result = []
        for i in range(1, nrows):  #
            rows = table.row_values(i)  # 行的数据放在数组里
            sku = rows[0]
            keyword = str(rows[1]).split("-")[1] + " " + str(rows[2]).replace("|", " ") + " window regulator"
            kind = rows[3]
            result.append([sku, keyword, kind])
        return result
    def writeEbaynoTocsv(self):
        sku_keyword = self.getSkuAndKeyWord()
        resultsku = []
        for i in range(len(sku_keyword)):
            sku = sku_keyword[i][0]
            resultsku.append([sku])
            keyword = sku_keyword[i][1]
            kind = sku_keyword[i][2]
            print(keyword)
            result = ebay.search(keyword)
            out = open("sku\\" + str(sku) + ".csv", "w", newline="")
            csv_writer = csv.writer(out)
            csv_writer.writerow(["sku", "kind", "ebayno", "ebay_closest_currency", "ebay_closest_price"])
            for i in range(len(result)):
                # print(str(i), end="\t")
                # print(result[i])
                csv_writer.writerow([sku, kind, result[i][0], result[i][1], result[i][2]])
        return resultsku





if __name__ == '__main__':
    ebay = EbaySpider()
    skuarray = ebay.writeEbaynoTocsv()
    print(skuarray)
    for i in range(len(skuarray)):
        sku = skuarray[i][0]
        print(sku)
        feed = Feedback_ebayno(sku)
        feed.feedbackWriteToCsv()




2:

my_feedback_ebayno.py


import pymysql
import numpy as np
import csv
class Feedback_ebayno():
    def __init__(self, sku):

        self.sku = sku

    #传入一个ebayno,返回数据库中对应的数据
    def connectViogidata(self,ebayno):
        db = pymysql.connect(host='', user='', passwd='', db='', port=3306, charset='utf8')
        cursor = db.cursor()
        sql = ' select  id, ebayno, product_name, price, currency, when_time,shop_name from feedback where ebayno = %s'
        cursor.execute(sql,(ebayno))
        db.commit()
        cursor.close()
        db.close()
        result = cursor.fetchall()
        return result


    def feedbackWriteToCsv(self):
        # 写入到csv表中

        out = open("sku\\feedback_"+str(self.sku)+".csv", "w", newline="")
        csv_writer = csv.writer(out)
        csv_writer.writerow(["sku","kind","ebayno","product_name","price","currency","when_time","shop_name","ebay_closest_price","ebay_closest_currency"])

        # 从csv文件中读取数据
        with open("sku\\"+str(self.sku)+".csv", newline="") as f:
            reader = csv.reader(f)
            k = 0
            for row1 in reader:
                if k == 0:  # 去掉列名
                    k = 1
                    continue
                k = k + 1
                if len(row1) == 0:
                    break
                kind = row1[1]
                ebayno = row1[2]
                currency = row1[3]
                price = row1[4]
                feedback = self.connectViogidata(ebayno)  # 得到元组数据

                if feedback is not None:
                    for row in feedback:  # 去掉外层括号
                        csv_writer.writerow([self.sku , kind,str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]) ,price,currency])


# if __name__ == '__main__':
#
#     feed = Feedback_ebayno("A80050-740-485")
#     feed.feedbackWriteToCsv()






评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值