# coding:utf-8
import requests
import bs4
import time
import xlwt
import random
def get_urls(url, page):
"""获取查询商品的每家店的地址"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive'}
html = requests.get(url=url + "&beginPage=" + str(page), headers=headers, timeout=10).text
soup = bs4.BeautifulSoup(html, "lxml")
tables = soup.find('div', attrs={'id': 'sw_mod_mainblock'})
table = tables.find('ul').find_all('div', class_='list-
爬虫实战:9,爬取1688商家联系方式
最新推荐文章于 2025-03-24 21:36:48 发布