这几天由于特殊原因,闲在家中无事干,恰逢老妹要在家上课,家里没有广西广电机顶盒,所以只能去网上下载下来放到电视上看。前段时间又学了点爬虫正好拿来练练手(已查阅网站无robots协议限制)
网站链接:广西空中课堂
# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
import re
import datetime
from selenium import webdriver
import os
import time
class MycoursespiderSpider(scrapy.Spider):
name = 'mycoursespider'
global mydict
mydict = {}
start_urls = ['http://zt.gxtv.cn/zt/default.html']
def parse(self, response):
curr_time = datetime.datetime.now()
global today
today = str(curr_time.month) + '月' + str(curr_time.day) + '日'
global mypath
mypath = os.path.dirname(os.path.realpath(__file__)) + '/' + today
if not os.path.exists(mypath):
os.mkdir(mypath)
mypath = mypath + '/'
else:
mypath = mypath + '/'
domain = 'http://zt.gxtv.cn'
ctable = response.css('a#ctable::attr(href)').extract()[0] #获取课程表
yield scrapy.Request(ctable, callback=self.parsecoursetable, meta={'url': ctable})
g5 = response.css('ul#g5 a[target=_blank]').extract() #获取五年级栏目的内容
g4 = response.css('ul#g4 a[target=_blank]').extract() #获取四年级栏目的内容
g5 = ''.join(g5)
g4 = ''.join(g4)
soup = BeautifulSoup(g5, 'html.parser')
ensoup = BeautifulSoup(g4, 'html.parser')
for i in ensoup.find_all('a'):
if re.search(today + '-英语', i['title']) is not None: #查看今天有没有英语课,乡下五年级学四年级
mydict.update({i['title']: domain + i['href']})
for i in soup.find_all('a'): #查看今天五年级的更新内容
if re.search(today, i['title']) is not None:
if re.search('-英语', i['title']) is not None:
pass
else:
mydict.update({i['title']: domain + i['href']}) #添加到待解析的字典中
for key in mydict:
page = mydict[key]
yield scrapy.Request(page, callback=self.parseinside)
def parseinside(self, response):
curr_time = datetime.datetime.now() #当前时间
filename = str(curr_time.month) + '-' + str(curr_time.day) + '.txt'
playhost = 'https://videocdn.liangtv.cn.*mp4|http://video.cdn.liangtv.cn.*mp4' #匹配链接字符串
resp = response.text
print(resp)
title = response.css('h3#title::text').extract_first()
print(title)
playlink = re.search(playhost, resp)
if playlink is not None:
video = str(playlink.group(0))
mydict[title] = video
else:
return
with open(mypath + filename, 'w+') as f:
for key in mydict:
f.write(str(key).replace(u'\xa0', u' ') + ':' + str(mydict[key]).replace(u'\xa0', u' '))
f.write('\n')
yield scrapy.Request(video, self.parsevideo, meta={'title': title}) #注释可不下载视频,meta实现内部函数之间传参
def parsevideo(self, response): #保存视频
print(response.request.headers['User-Agent'])
title = response.meta['title'] + '.mp4'
# title = title.translate(None, r'|\\?*<\":>+[]/')
with open(mypath + title, 'wb') as f:
f.write(response.body)
def parsecoursetable(self, response): #换成selenium,虽然慢了点,但是达到目的了
url = response.meta['url']
browser = webdriver.Firefox(
executable_path=r'C:\Users\qq\AppData\Local\Programs\Python\Python38\geckodriver.exe')
browser.get(url)
time.sleep(3)
soup = BeautifulSoup(browser.page_source, 'html.parser')
i = soup.find(id='news_con')
print(str(i))
piclink = re.findall('http://.*?jpg|https://.*?jpg', str(i))
browser.quit()
if piclink is not None:
lengh = len(piclink)
for i in range(0, lengh):
yield scrapy.Request(piclink[i], callback=self.parsepicture, meta={'title': str(i)})
def parsepicture(self, response):
title = today + '_' + response.meta['title'] + '.jpg'
with open(mypath + title, 'wb') as f:
f.write(response.body)
放弃splash,使用selenium,简单多了
小记:在写文件时gbk无法处理\xa0即网页的空白字符,所以需要str.replace(u'\xa0', u' ')