PYTHON爬虫--04--实战--猫眼 | 头条 | 淘宝

一、抓取猫眼top100

import requests
import json
import re
from requests.exceptions import RequestException
from multiprocessing import Pool



def get_one_page(url):
    kv = {
        'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'
    }
    response = requests.get(url, headers=kv)
    try:
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        return None


def parse_page(page):
    pattern = re.compile('<dd.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?>.*?name.*?'
                         'boarditem-click.*?>(.*?)</a>.*?star">(.*?)'
                         '</p>.*?releasetime">(.*?)</p>.*?score.*?integer">(.*?)</i>.*?fraction">(.*?)</i>',re.S)

    # pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name">'
    #                      + '<a.*?>(.*?)</a>.*?"star">(.*?)</p>.*?releasetime">(.*?)</p>'
    #                      + '.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)

    items = re.findall(pattern,page)
    for item in items:
        yield {
            'index': item[0],
            'pic': item[1],
            'fname': item[2],
            'actor': item[3].strip()[3:],
            'rtime': item[4][5:],
            'score': item[5]+item[6]
        }


def save_file(item):
    with open('e:\\12.txt', 'a',encoding='utf-8') as f:
        f.write(json.dumps(item,ensure_ascii=False)+'\n')


def main(offset):
    url = "http://maoyan.com/board/4?offset="+str(offset)
    page = get_one_page(url)
    # print(page)
    for item in parse_page(page):
        print(item)
        save_file(item)


if __name__ == '__main__':
    pool = Pool()
    pool.map(main, [x*10 for x in range(10)])

 

二、分析ajax请求抓取今日头条街拍美图

# -*- coding:utf-8 -*-
import requests
from urllib.parse import urlencode
from requests import RequestException
import json
from bs4 import BeautifulSoup
import lxml
import re
from config import *
import pymongo
import os
from hashlib import md5
from multiprocessing import Pool

client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]


def get_page_index(kw, offset):
    data = {
        'autoload': 'true',
        'count': '20',
        'cur_tab': '1',
        'format': 'json',
        'from': 'search_tab',
        'keyword': kw,
        'offset': offset
    }

    url = 'https://www.toutiao.com/search_content/?' + urlencode(data)

    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('获取索引页错误')
        return None


def parse_page_index(html):
    data = json.loads(html)
    if 'data' in data.keys():
        for item in data.get('data'):
            if 'article_url' in item.keys():
                yield item.get('article_url')


def get_page_detail(url):
    headers = {
        'user-agent': 'Mozilla/5.0'
    }

    try:
        response = requests.get(url, headers = headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('获取详细页面错误')
        return None


def parse_page_detail(html,url):
    if html != None:
        soup = BeautifulSoup(html, 'lxml')
        title = soup.select('title')[0].get_text()
        #print(title)
        pattern = re.compile('gallery: JSON.parse\("(.*?)"\)', re.S)
        gallery = re.search(pattern,html)
        if gallery:
            result = json.loads(gallery.group(1).replace("\\", ""))
            if 'sub_images' in result.keys():
                sub_images = result.get('sub_images')
                images = [item.get('url') for item in sub_images]
                for image in images:download_pic(image)
                return {
                    'title': title,
                    'url': url,
                    'images': images
                }


def save_to_mongo(data):
    if data != None:
        if db[MONGO_TABLE].insert(data):
            print("插入到数据库成功")
            return True
        return False


def download_pic(url):
    headers = {
        'user-agent': 'Mozilla/5.0'
    }
    print('正在下载' + url)
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            save_pic(response.content)
        return None
    except RequestException:
        print('下载图片错误')
        return None


def save_pic(pic):
    file_path = '{0}/pic/{1}.{2}'.format(os.getcwd(), md5(pic).hexdigest(), 'jpg')
    if not os.path.exists(file_path):
        with open(file_path, 'wb') as f:
            f.write(pic)
            f.close()


def main(offset):
    html = get_page_index(KEYWORD, offset)
    for url in parse_page_index(html):
        detail = get_page_detail(url)
        result = parse_page_detail(detail,url)
        save_to_mongo(result)


if __name__ == '__main__':
    pool = Pool()
    pool.map(main,[i*20 for i in range(GROUP_START,GROUP_END)])

 

三、用selenium爬取淘宝的商品信息

  • 注意:selenium 和 phantomJS 分手此处使用 Chrome 的无头模式
# -*- coding: utf-8 -*-
import re
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyquery import PyQuery as pq
import config
import pymongo


options = Options()
options.add_argument('-headless')  # 无头参数
#driver = Firefox(executable_path='geckodriver', firefox_options=options)
brower = webdriver.Chrome(chrome_options=options)
wait = WebDriverWait(brower, 10)
client = pymongo.MongoClient(config.MONGO_URL)
db = client[config.MONGO_DB]


def search():
    try:
        brower.get('https://www.taobao.com/')
        element = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#q"))
        )
        submit = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "#J_TSearchForm > div.search-button > button"))
        )
        element.send_keys('美食')
        submit.click()
        page = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.total"))
        )
        return page
    except TimeoutException:
        return search()


def next_page(page_number):
    try:
        input = wait.until(
            EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > input"))
        )
        submit = wait.until(
            EC.element_to_be_clickable((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit"))
        )
        input.clear()
        input.send_keys(page_number)
        submit.click()
        wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, "#mainsrp-pager > div > div > div > ul > li.item.active > span"),str(page_number)))
        get_products()
    except TimeoutException:
        next_page(page_number)


def get_products():
    wait.until(
        EC.presence_of_element_located((By.CSS_SELECTOR, "#mainsrp-itemlist .items .item"))
    )
    html = brower.page_source
    doc = pq(html)
    items = doc('#mainsrp-itemlist .items .item').items()
    for item in items:
        product = {
            'pic':'https:' + item.find('.pic .img').attr('src'),
            'price':item.find('.price').text().replace('\n', ''),
            'count':item.find('.deal-cnt').text()[:-3],
            'title':item.find('.title').text().replace('\n', ''),
            'shop':item.find('.shop').text(),
            'city':item.find('.location').text()
        }
        #print(product)
        save_to_mongo(product)


def save_to_mongo(result):
    try:
        if db[config.MONGO_TABLE].insert(result):
            print('save to mongo success')
    except Exception:
        print('fail to save to mongo',result)


def main():
    try:
        total = search().text
        pattern = re.compile('(\d+)')
        total = int(re.search(pattern, total).group(1))
        for i in range(2, total+1):
            next_page(i)
    except Exception:
        print('error')
    finally:
        brower.close()


if __name__ == '__main__':
    main()

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值