爬虫实战:9,爬取1688商家联系方式

# coding:utf-8
import requests
import bs4
import time
import xlwt
import random


def get_urls(url, page):
    """获取查询商品的每家店的地址"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Accept-Encoding': 'gzip, deflate',
        'Connection': 'keep-alive'}
    html = requests.get(url=url + "&beginPage=" + str(page), headers=headers, timeout=10).text
    soup = bs4.BeautifulSoup(html, "lxml")
    tables = soup.find('div', attrs={'id': 'sw_mod_mainblock'})
    table = tables.find('ul').find_all('div', class_='list-item-left')
    # print(table)
    urls = []
    for items in table:
        item = items.find('a').get('href')
        # print(item)
        urls.append(item)
    # print(urls)
    # url_1 = random.choice(urls)
    # print(url_1)
    return urls


def get_contact(url_1):
    """ 获取每家店的联系方式 """
    headers = {
         'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Accept-Encoding': 'gzip, deflate'}
    session = requests.session()
    try:
        html = session.get(url_1, headers=headers, timeout=10).text
        contact_url = bs4.BeautifulSoup(html, 'lxml').find('div', class_='top-nav-bar-box').find('li', attrs={
            'data-page-name': 'contactinfo'}).find('a').get('href')
        # print(contact_url)
    except BaseException:
        print('-----------------')
        return
    try:
        html = session.get(contact_url, headers=headers,timeout=10).text
        table = bs4.BeautifulSoup(html, 'lxml').find('div', class_='fd-line').find_all('dl')
        title = bs4.BeautifulSoup(html, 'lxml').find('div', class_='contact-info').find('h4').get_text()
        info = []
        for item in table[:-1]:
            info.append(item.get_text().replace('\n', '').replace('\xa0', ''))
        # print(info)
        return (title, info)
    except:
        print("~~~~~~~~~~~~~~~~~~~")

def main():
    url = "http://s.1688.com/company/company_search.htm?keywords=%BE%AB%C3%DC%BB%FA%D0%B5&earseDirect=false&button_click=top&n=y&pageSize=30"
    for j in range(100):
        urls = get_urls(url,j)
        for i in range(0, len(urls) - 1):
            url_1 = urls[i]
            data = get_contact(url_1)


if __name__ == "__main__":
    main()

  • 1
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 6
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值