python海贼王_Python 学习笔记---爬取海贼王动漫

最近无聊整理的爬虫代码,可以自动爬取腾讯动漫的任意漫画,思路如下:

1. 先获取想下载的动漫url, 这里用了 getUrls ,直接获取动漫的最后一章

2. 然后进入到该动漫去获取要下载的图片url

3. 下载到本地

import os

import random

import time

from random import randint

from bs4 import BeautifulSoup

from selenium import webdriver

from selenium.webdriver import DesiredCapabilities

import urllib.request as urllib2

ROOT_URL = "http://ac.qq.com"

target_url = [

ROOT_URL + "/Comic/comicInfo/id/505430", # 海贼王

]

ua_list = [

"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv2.0.1) Gecko/20100101 Firefox/4.0.1",

"Mozilla/5.0 (Windows NT 6.1; rv2.0.1) Gecko/20100101 Firefox/4.0.1",

"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",

"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",

"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"

]

user_agent=random.choice(ua_list)

dir_path="D:/py/海贼王/"

def getImageUrls(comic_url):

'''

通过Selenium和Phantomjs获取动态生成的数据

'''

urls = []

dcap = dict(DesiredCapabilities.PHANTOMJS)

dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/4.0 (compatible; MSIE 5.5; windows NT)")

browser = webdriver.PhantomJS(executable_path=r"E:\py\littlepy\tencent_cartoon\phantomjs-2.1.1-windows\bin\phantomjs.exe",

desired_capabilities=dcap)

browser.get(comic_url)

imgs = browser.find_elements_by_xpath("//div[@id='mainView']/ul[@id='comicContain']//img")

for i in range(0, len(imgs) - 1):

if i == 1: # 略过广告图片

continue

urls.append(imgs[i].get_attribute("src"))

js = 'window.scrollTo( 800 ,' + str((i + 1) * 1280) + ')'

browser.execute_script(js)

time.sleep(randint(3, 6))

browser.quit()

print("urls=",urls)

return urls

def getUrls(comic_url):

result = dict()

req = urllib2.Request(comic_url)

req.add_header('User-Agent', user_agent)

print("url=",comic_url)

response = urllib2.urlopen(req)

soup = BeautifulSoup(response, "lxml")

#print("soup=",soup)

# 返回最近漫画中的最新20话

page = soup.find(attrs={"class": "chapter-page-new works-chapter-list"}).find_all(

"a") # 全部漫画 chapter-page-new works-chapter-list

title = page[-1]['title']

result[title] = ROOT_URL + page[-1]['href']

print("title=",title)

print("result=",result[title])

return title,result[title]

def downloadComics(dir_path, urls):

for url in urls:

urllib2.urlretrieve(url, dir_path + url[-8:-2])

#print("url=",url[-9:-2])

if __name__ == "__main__":

title,result_url=getUrls(target_url[0])

urls=getImageUrls(result_url)

path=dir_path+title+"/"

isExists = os.path.exists(path)

if not isExists:

os.makedirs(path)

print(path + ' 创建成功')

downloadComics(path,urls)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值