pythonrequests教程_讨论 - 廖雪峰的官方网站

这是w3cschool里面的教程,默认的解析器用不了,我改成 BeautifulSoup(markup,"html5lib") 就能用了。 最后一段开始下载的代码有问题

# -*- coding:UTF-8 -*-

from bs4 import BeautifulSoup

import requests, sys

"""

类说明:下载《笔趣看》网小说《一念永恒》

Parameters:

Returns:

Modify:

2017-09-13

"""

class downloader(object):

def __init__(self):

self.server = 'http://www.biqukan.com/'

self.target = 'http://www.biqukan.com/1_1094/'

self.names = [] #存放章节名

self.urls = [] #存放章节链接

self.nums = 0 #章节数

"""

函数说明:获取下载链接

Parameters:

Returns:

Modify:

2017-09-13

"""

def get_download_url(self):

req = requests.get(url = self.target)

html = req.text

div_bf = BeautifulSoup(html)

div = div_bf.find_all('div', class_ = 'listmain')

a_bf = BeautifulSoup(str(div[0]))

a = a_bf.find_all('a')

self.nums = len(a[15:]) #剔除不必要的章节,并统计章节数

for each in a[15:]:

self.names.append(each.string)

self.urls.append(self.server + each.get('href'))

"""

函数说明:获取章节内容

Parameters:

target - 下载连接(string)

Returns:

texts - 章节内容(string)

Modify:

2017-09-13

"""

def get_contents(self, target):

req = requests.get(url = target)

html = req.text

bf = BeautifulSoup(html)

texts = bf.find_all('div', class_ = 'showtxt')

texts = texts[0].text.replace('\xa0'*8,'\n\n')

return texts

"""

函数说明:将爬取的文章内容写入文件

Parameters:

name - 章节名称(string)

path - 当前路径下,小说保存名称(string)

text - 章节内容(string)

Returns:

Modify:

2017-09-13

"""

def writer(self, name, path, text):

write_flag = True

with open(path, 'a', encoding='utf-8') as f:

f.write(name + '\n')

f.writelines(text)

f.write('\n\n')

if __name__ == "__main__":

dl = downloader()

dl.get_download_url()

print('《一年永恒》开始下载:')

for i in range(dl.nums):

dl.writer(dl.names[i], '一念永恒.txt', dl.get_contents(dl.urls[i]))

sys.stdout.write(" 已下载:%.3f%%" % float(i/dl.nums) + '\r')

sys.stdout.flush()

print('《一年永恒》下载完成')

然后在博客园看到另一种类似的源码:

from bs4 import BeautifulSoup

import requests

class spiderstory(object):

def __init__(self):

self.url = 'http://www.365haoshu.com/Book/Chapter/'

self.names = []#存放章节名称

self.hrefs = []#存放章节链接

def get_urlandname(self):

'''获取章节名称和和章节URL'''

response = requests.get(url=self.url + 'List.aspx?NovelId=6686 ')

req_parser = BeautifulSoup(response.text,"html.parser")

div = req_parser.find_all('div',class_='user-catalog-ul-li')

a_bf = BeautifulSoup(str(div))

a = a_bf.find_all('a')

for i in a:

self.names.append(i.find('span',class_='fl').string)

self.hrefs.append(self.url + i['href'])

def get_text(self,url):

'''获取对应章节内容'''

respons2 =requests.get(url=url)

c = BeautifulSoup(str(respons2.text),'html.parser')

b = c.find_all('p', class_='p-content')

text = []

for temp in b:

text.append(temp.string)

return text

def writer(self,name,path,text1):

''' 写入TXT文档'''

with open(path,'a',encoding='utf-8') as f:

f.write(name + '\n')

f.writelines(text1)

f.write('\n\n')

if __name__ == "__main__": # 运行入口

a= spiderstory()

a.get_urlandname()

for i in range(len(a.names)):

name = a.names[i]

text = str(a.get_text(a.hrefs[i]))

a.writer(name,'F:\小说.txt',text)

print(a)

整合之后 在pycharm中运行 提示:

《巫神纪》开始下载:

from bs4 import BeautifulSoup

import requests

class spidertext(object): def __init__(self): self.url = 'https://www.biqukan.com' self.names = [] # 存放章节名称 self.hrefs = [] # 存放章节链接 def get_urlandname(self): '''获取章节名称和和章节URL''' response = requests.get(url=self.url)

req_parser = BeautifulSoup(response.text, "html5lib")

div = req_parser.find_all('div', class_="listmain")

a_bf = BeautifulSoup(str(div), "html5lib")

a = a_bf.find_all('a')

for i in a[13 : -2]: self.names.append(i.string)

self.hrefs.append(self.url + i['href'])

def get_text(self, url): '''获取对应章节内容''' respons2 = requests.get(url=url)

req_parser2 = BeautifulSoup(respons2.text, "html5lib")

div2 = req_parser2.find_all('div', id='content', class_='showtxt')

b_bf = BeautifulSoup(str(div2))

text = []

for temp in b_bf: text.append(temp.string)

return text

def writer(self, name, path, text1): ''' 写入TXT文档''' with open(path, 'a', encoding='utf-8') as f: f.write(name + '\n')

f.writelines(text1)

f.write('\n\n')

if __name__ == "__main__": # 运行入口 st = spidertext()

st.get_urlandname()

print('《巫神纪》开始下载:')

for i in range(len(st.names)): name = st.names[i]

text = str(st.get_text(st.hrefs[i]))

st.writer(name, 'D:\巫神纪.txt', text)

print(st)

**求解 到底是哪出了错? 运行接口这块应该怎么写才对呢? **

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值