用python 爬取百度百科内容-使用python爬取小说全部内容

爬取代码为import urllib.request

from bs4 import BeautifulSoup

#coding: utf-8

class xiaoShuo():

def __init__(self,url,parLabelValue,parLabelType,parLabel,clildLabelValue,clildLabelType,clildLabel,enc):

self.url = url;

self.parLabelValue = parLabelValue;

self.parLabelType = parLabelType;

self.enc=enc;

self.parLabel = parLabel;

self.clildLabelValue = clildLabelValue;

self.clildLabelType = clildLabelType;

self.clildLabel = clildLabel;

def getUrlContent(self):

response = urllib.request.urlopen(self.url);

html = response.read().decode(self.enc);

pageNode = BeautifulSoup(html, 'html.parser')

iterms = pageNode.find_all(self.parLabel,{self.parLabelType:self.parLabelValue})

for i in range(len(iterms)):

tagA = iterms[i].select("a");

for j in range(len(tagA)):

# print("%s: %s"%(tagA[j].get_text(),tagA[j].get("href")))

content = self.getXiaoShuoContent(self.url,self.clildLabel,self.clildLabelValue,self.clildLabelType,self.enc)

print(content)

def getXiaoShuoContent(self,url,childLabel,childLabelValue,childLabelType,enc):

response = urllib.request.urlopen(url);

html = response.read().decode(enc);

pageNode = BeautifulSoup(html, 'html.parser')

iterms = pageNode.find_all(childLabel, {childLabelType: childLabelValue})

content = "";

for i in range(len(iterms)):

content = iterms[i].get_text(),

return content;

def writeTofile(self,fileName,content):

try:

with open("%s.txt" %(fileName), "w") as f: # 格式化字符串还能这么用!

for i in content:

f.write(i)

except:

print("写入错误")

a = xiaoShuo("https://www.szzyue.com/dushu/11/11255/","L","class","td","contents","id","dd","gbk");

html = a.getUrlContent();

# print(html)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值