spider———循环爬取花火所有期刊

熟悉soup和xpath方法,寻找Html标签

import os
import shutil
import time
import urllib.request
from bs4 import BeautifulSoup
from lxml import etree



def handle_request(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
    }
    request = urllib.request.Request(url=url,headers=headers)
    return request


def get_content(text_src):
    request = handle_request(text_src)
    text_content = urllib.request.urlopen(request).read().decode('utf8')
    soup = BeautifulSoup(text_content,'lxml')
    other = soup.select('.pagelink > ul > li > b')
    if other == []:
        content_list = soup.select('.zw > p')
        content_all = ''
        for content in content_list:
            content1 = content.text
            # print(content)
            content_all += content1 + '\n'
        return content_all
    else:
        text_src = text_src
        others = int(other[0].text)
        content_all = ''
        for i in range(1,others+1):
            text_src_new = text_src.split('.')
            text_src_content = text_src_new[0] +'.'+ text_src_new[1] +'.'+ text_src_new[2] + '_%d.' % i + text_src_new[-1]
            if i == 1:
                text_src_content = text_src
            # print(text_src_content)
            request = handle_request(text_src_content)
            response = urllib.request.urlopen(request).read().decode('utf8')
            soup = BeautifulSoup(response, 'lxml')
            content_list = soup.select('.zw > p')
            content_all1 = ''
            for content in content_list:
                content1 = content.text
                # print(content)
                content_all1 += content1 + '\n' + '\n'
            content_all += content_all1

        # print(content_all)
        return content_all




def parse_catalog_text(catalog_text,title):
    tree = etree.HTML(catalog_text)
    catalog_text_src = tree.xpath('//div[@class="boxcon"]/ul//h3/a/@href')
    catalog_text_name = tree.xpath('//div[@class="boxcon"]/ul//h3/a/text()')
    for i in range(0,len(catalog_text_src)):
        text_src = 'https://m.feiyanqing.com' + catalog_text_src[i]
        text_name = catalog_text_name[i]
        print("正在下载--%s--...." % text_name)
        fp = open('%s.txt' % text_name, 'w', encoding='utf8')
        content = get_content(text_src)
        # print(content)
        fp.write(text_name + content)
        print("结束下载--%s--.." % text_name)
        time.sleep(2)
        fp.close()
        shutil.move('%s.txt' % text_name,title)



def parse_catalog_content(catalog_content,dirname):
    soup = BeautifulSoup(catalog_content,'lxml')
    catalog = soup.select('.boxcon > ul > li > p > a')
    num = int(input("请输入下载总目录个数--"))
    for log in range(0,num):
        title = catalog[log].string
        # print("正在下载--%s--...." % title)
        if not os.path.exists(title):
            os.mkdir(title)
        catalog_src = 'https://m.feiyanqing.com' + catalog[log].attrs['href']
        request = handle_request(catalog_src)
        catalog_text = urllib.request.urlopen(request).read().decode('utf8')
        parse_catalog_text(catalog_text,title)
        shutil.move(title,dirname)
        # print("结束下载--%s--.." % title)
        time.sleep(2)



def main():
    dirname = '花火'
    if not os.path.exists(dirname):
        os.mkdir(dirname)
    url = 'https://m.feiyanqing.com/huahuo/'
    request = handle_request(url)
    catalog_content = urllib.request.urlopen(request).read().decode('utf8')
    parse_catalog_content(catalog_content,dirname)
    # print(catalog_content)


if __name__ == '__main__':
    main()


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值