python爬虫怎么写多线程_零基础写python小说爬虫--如何使用多线程爬取笔趣阁

[Python] 纯文本查看 复制代码import math

import threading

import time

import os

import requests

from bs4 import BeautifulSoup

from urllib3.connectionpool import xrange

def combineText(bookName, count):

combine = 'D:/SanMu/' + bookName + '.txt'

t = open(combine, mode='w', encoding='utf-8')

for i in xrange(count):

txt = 'D:/SanMu/' + bookName + str(i+1) + '.txt'

with open(txt, mode='r', encoding='utf-8') as f: # 打开文件

data = f.read() # 读取文件

t.write(data)

f.close()

if os.path.exists(txt):

# 如果文件存在

#删除文件,可使用以下两种方法。

os.remove(txt)

t.close()

def split_list(ls, each):

list = []

eachExact = float(each)

groupCount =int(len(ls) // each)

groupCountExact =math.ceil(len(ls) / eachExact)

start = 0

for i in xrange(each):

if i == each-1 & groupCount < groupCountExact: # 假如有余数,将剩余的所有元素加入到最后一个分组

list.append(ls[start:len(ls)])

else:

list.append(ls[start:start + groupCount])

start = start + groupCount

return list

def download_page(url):

try:

data = requests.get(url).content

if str(data).find('503 Service Temporarily Unavailable') > 0:

print("503空数据休眠3秒")

time.sleep(3)

data = requests.get(url).content

except:

time.sleep(3)

data = requests.get(url).content

return data

class myThread (threading.Thread):

def __init__(self, split_dds, name):

threading.Thread.__init__(self)

self.name = name

self.split_dds = split_dds

def run(self):

print("开始线程:" + self.name)

download_book(self.split_dds, self.name)

print("退出线程:" + self.name)

def download_book(split_dds, bookName):

file_handle = open('D:/SanMu/'+bookName+'.txt', mode='w', encoding='utf-8')

for dd in split_dds:

beautiful_soup = BeautifulSoup(download_page(dd.find('a')['href']))

name = beautiful_soup.find_all('h1')[0].text

# if name.find('503') > 0:

# time.sleep(3)

# print("503空数据")

# beautiful_soup = BeautifulSoup(download_page(dd.find('a')['href']))

file_handle.write(name)

file_handle.write('\r\n')

catalogue_html = str(beautiful_soup.find('div', attrs={'id': 'content'}))

html_replace = catalogue_html.replace("

", "")

replace = html_replace.replace("/n", "").replace(

"

", "").replace("

", "")

split = replace.split("

")

for p_ in split:

try:

file_handle.write(p_)

file_handle.write('\r\n')

except:

pass

file_handle.close()

def parse_html(html):

soup = BeautifulSoup(html)

movie_list_soup = soup.find('table')

# print(movie_list_soup)

movie_list = []

movie_name_list = []

if movie_list_soup is not None:

i = 1

for movie_li in movie_list_soup.find_all('tr'):

if movie_li.find_all('th'):

continue

a_ = movie_li.find_all('td', attrs={'class': 'odd'})[0].find('a')

print(i, '.', a_.text)

movie_list.append(a_['href'])

movie_name_list.append(a_.text)

i = i + 1

count = int(input('请输入书籍序号')) - 1

page = BeautifulSoup(download_page(movie_list[count]))

dds = page.find_all('dd')

print('本次下载共'+str(len(dds))+'章')

split_dds = split_list(dds, 3)

try:

book_name = movie_name_list[count]

thread1 = myThread(split_dds[0], book_name + "1")

thread2 = myThread(split_dds[1], book_name + "2")

thread3 = myThread(split_dds[2], book_name + "3")

thread1.start()

thread2.start()

thread3.start()

thread1.join()

thread2.join()

thread3.join()

combineText(book_name, 3)

except:

pass

while 1:

pass

def main():

parse_html(download_page("https://www.biquge5200.com/modules/article/search.php?searchkey=" + input("搜索:")))

main()

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值