1.完整代码
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from urllib.parse import quote
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq
import csv
def index_page(pages, keyword):
url = 'https://list.tmall.com/search_product.htm?q=' + quote(keyword)
brower.get(url)
for page in range(1, pages + 1):
print('正在爬取第' + str(page) + '页')
if page > 1:
input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.ui-page-skipTo')))
submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'button.ui-btn-s')))
input.clear()
input.send_keys(page)
brower.implicitly_wait(10)
submit.click()
wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, '.ui-page-num'), str(page)))
get_products(brower)
def get_products(brower):
html = brower.page_source
doc = pq(html)
items = doc('div .product').items()
for item in items:
information = []
image = item.find('div div a img').attr('src')
information.append(str(image))
name = item.find('.productTitle a').text()
information.append(str(name))
price = item.find('.productPrice em').text()
information.append(str(price))
detail = item.find('div div a').attr('href')
information.append(str(detail))
shop = item.find('.productShop a').text()
information.append(str(shop))
sales = item.find('.productStatus span em').text()
information.append(str(sales))
saveinformation(information)
print(information)
def saveinformation(infors):
with open('tian_mao.csv', 'w+', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(infors)
if __name__ == '__main__':
page = int(input("输入页数:"))
keyword = input("输入需要获取的商品名称:")
brower = webdriver.Chrome()
wait = WebDriverWait(brower, 20)
brower.maximize_window()
index_page(page, keyword)
brower.quit()
2.爬取第二页时需要手机扫码登录