1、环境
pycharm+selenium+pyquery+xlsxwrite+xlrd
2、功能说明
从前文爬取亚马逊商品一中确定了不能自己选择查找内容的弊端,所以添加了通过xlrd从excel中获取关键词链表,爬取亚马逊指定关键词商品 的信息,并将获取到的信息通过xlsxwrite写入到excel中
3、完整代码
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from pyquery import PyQuery as Pq
import xlsxwriter,xlrd
def parse_detail(page_source, page_index,key):
global write_row
isEnd = False
doc = Pq(page_source)
# 判断是否为最后一页,最后一页就停止往下获取
last = doc('.a-disabled.a-last').text()
if last.strip() != "":
isEnd = True
print("last text = ",last," isEnd = ",isEnd)
# 获取商品所有信息
main_list = doc('.s-main-slot.s-result-list.s-search-results.sg-row')
par_list = main_list.children().items()
normal_count = 0
for child in par_list:
# asin 为空的表示非正常商品链接
asin = child.attr('data-asin')
if asin.strip() == "":
continue
normal_count += 1
# 是否为广告
Issponsor = child('.s-label-popover-default .a-size-mini.a-color-secondary').text()
# 标题
title = child('.a-size-base-plus.a-color-base.a-text-normal').text()
# 价格,但是会有打折价格,所以多个价格只选第一个
price_list = child('.a-price .a-offscreen').text().split(" ")
price = price_list[0]
# 当前商品位置
cur_pos = str(page_index)+"-"+str(normal_count)
print(key,title, ", ", price, ", ", Issponsor, ", ", cur_pos)
# 写入到excel
write_row += 1
worksheet.write_row('A'+str(write_row),[key,title,price,Issponsor,cur_pos])
print(normal_count)
return isEnd
if __name__ == '__main__':
# 获取关键词
key_list = []
try:
# 需要文件存在
data = xlrd.open_workbook("C:/Users/45906/Desktop/关键词.xlsx")
table = data.sheet_by_index(0)
key_list = table.col_values(0)
print(key_list)
if key_list[0] == "关键词" :
key_list.remove("关键词")
print(key_list)
except:
print("excel Abnormal operation")
exit()
# 设置get直接返回,不再等待界面加载完成
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities["pageLoadStrategy"] = "none"
chrome_options = webdriver.ChromeOptions()
# 无窗口模式
# chrome_options.add_argument('--headless')
# 禁止硬件加速,避免严重占用cpu
chrome_options.add_argument('--disable-gpu')
# 关闭安全策略
chrome_options.add_argument("disable-web-security")
# 禁止图片加载
chrome_options.add_experimental_option('prefs', {'profile.managed_default_content_settings.images': 2})
# 隐藏"Chrome正在受到自动软件的控制
chrome_options.add_argument('disable-infobars')
# 设置开发者模式启动,该模式下webdriver属性为正常值
chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
# 模拟移动设备
chrome_options.add_argument('user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"')
driver = webdriver.Chrome(options=chrome_options)
# 返回驱动等待的变量
wait = WebDriverWait(driver, 20)
# driver.maximize_window()
write_row = 1
workbook = xlsxwriter.Workbook('C:/Users/45906/Desktop/test.xlsx')
worksheet = workbook.add_worksheet() # 创建一个sheet
excel_title = ["关键词", "标题", "价格", "模式", "自然位置"]
worksheet.write_row('A1', excel_title) # title 写入Excel
print(time.strftime("start %Y-%m-%d %H:%M:%S", time.localtime()))
# 固定搜索内容,变化的只有页面
search_page_url = 'https://www.amazon.com/s?k={}&page={}'
for key in key_list:
for i in range(1, 10):
# 最大往下爬取10页
print("正在爬取", search_page_url.format(key, i))
driver.get(search_page_url.format(key, i))
time.sleep(3)
# css选择器,返回结果存在跳出,异常报错
try:
wait.until(ec.presence_of_element_located((By.CSS_SELECTOR, "div.s-result-list")))
isEnd = parse_detail(driver.page_source,i,key)
if isEnd:
break
except:
print("url: "+search_page_url.format(i)+"获取失败")
pass
print(time.strftime("end %Y-%m-%d %H:%M:%S", time.localtime()))
driver.quit()
workbook.close()
4、结果
5、缺点
因为现在用的是分模块读写,但是python是有可以直接读写的模块的openpyxl