爬虫---爬取今日头条街拍2


import re,json
import requests
from urllib import request
import os
def b (url):
    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NTr 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
    }
    # print(url)
    # headers = {
    #     'User-Agent': ' Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36'
    # }
    response = requests.get(url,headers=headers)
    # print(response)

    html_str = response.text
    # print(html_str)
    pattern = r'gallery: JSON\.parse\((.*)\),'

    match_res = re.search(pattern,html_str)
    # print(match_res)

    # with open('ying.html', 'wb') as f:
    #     f.write(response.content)

    # 新建文件夹
    if not os.path.exists('load'):
        os.mkdir('load')

    if match_res:
        # 这本来就是str
        # print(match_res.group(1))
        json_origin = match_res.group(1)
        # 这是第一遍loads, 返回值是str
        res_buzhidao = json.loads(json_origin)
        # print(res_buzhidao)
        # print(type(res_buzhidao))
        res_dict = json.loads(res_buzhidao)
        # print(res_dict)
        # print(type(res_dict))

        sub_images_list = res_dict['sub_images']
        # print(sub_images_list)
        for image in sub_images_list:
            image_url = image['url']
            # print(image_url)
            filename = 'load/' + image_url.split('/')[-1] + '.jpg'
            # 下载图片
            request.urlretrieve(image_url, filename)
            # print('111')
    else:
        print('你写错了, 不应该来我这')


def a (offset):
    url = 'https://www.toutiao.com/search_content/?offset={}&format=json&keyword=%E8%A1%97%E6%8B%8D&autoload=true&count=20&cur_tab=1&from=search_tab'

    a_url = url.format(offset)
    # print(a_url)
    # print(url)
    response = requests.get(a_url)
    # 可以通过response.json 直接获取转化后的对象(dict)
    html_json_dict = response.json()
    # print(html_json_dict)
    # 获取dict中的data key对应的列表
    data_list = html_json_dict['data']
    # print(data_list)

    num = offset/20
    if num < 4:
        offset+=20
        # 如果列表中的每一项,有article_url我们就取这个值
        for data_item in data_list:
            if 'article_url' in data_item:
                article_url = data_item['article_url']
                # print(article_url)
                print(article_url)
                b(article_url)
                # response = requests.get(article_url)
                # print(response)

        a(offset)




if __name__=='__main__':

    a(0)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值