new不出来html文件,代码觉得没问题啊!就是没有结果,console和html文件都是空白!求助!...

#调度程序

#抓取百度百科相关Python词条网页的标题和简介

from baike_spider import url_manager, html_downloader, html_parser,\

html_outputer

#创建类

class SpiderMain(object):

#构造函数 初始化

def __init__(self):

#实例化需引用的对象

self.urls = url_manager.UrlManager()

self.downloader = html_downloader.HtmlDownloader()

self.parser = html_parser.HtmlParser()

self.outputer = html_outputer.HtmlOutputer()

def craw(self, root_url):

#添加一个到url中

self.urls.add_new_url(root_url)

#无法执行????

while self.urls.has_new_url():

#异常处理

try:

#打印1000条

count = 1

new_url = self.urls.get_new_url()

print('craw %d : %s' %(count, new_url))

#下载

html_cont = self.downloader.download(new_url)

new_urls, new_data = self.parser.parse(new_url, html_cont)

self.urls.add_new_urls(new_urls)

self.outputer.collect_data(new_data)

#爬一千个界面

if count == 1000:

break

count = count + 1

except:

print("craw failed")

self.outputer.output_html()

#创建main方法

if __name__ == "__main__":

root_url = "https://baike.baidu.com/item/Python/407313"

obj_spider = SpiderMain()

obj_spider.craw(root_url)

class UrlManager(object):

'url管理类'

#构造函数初始化set集合

def __init__(self):

self.new_urls = set()#待爬取的url

self.old_urls = set()#已爬取的url

#向管理器中添加一个新的url

def add_new_url(self, url):

if url is None:

return

if url not in self.new_urls and self.old_urls:

#既不在待爬取的url也不在已爬取的url中,是一个全新的url,

#因此将其添加到new_urls

self.new_urls.add(url)

# 向管理器中添加批量新的url

def add_new_urls(self, urls):

if urls is None or len(urls) == 0 :

return

for url in urls:

self.add_new_url(url)#调用add_new_url()

#判断是否有新的待爬取的url

def has_new_url(self):

return len(self.new_urls) == 1

#获取一个待爬取的url

def get_new_url(self):

new_url = self.new_urls.pop()#默认移除最后一个元素

self.old_urls.add(new_url)

return new_url

import urllib

class HtmlDownloader(object):

'下载页面内容'

def download(self, url):

if url is None:

return None

response = urllib.request.urlopen(url)

if response.getcode() != 200:

return None

return response.read()

'''

#解决请求路径中含义中文或特殊字符

url_ = quote(new_url, safe=string.printable);

response = request.urlopen(url_)

if(response.getcode()!=200):

return None #请求失败

html = response.read()

return html.decode("utf8")

'''

from bs4 import BeautifulSoup

import re

import urllib

class HtmlParser(object):

#page_url 基本url 需拼接部分

def _get_new_urls(self, page_url, soup):

new_urls = set()

#https://baike.baidu.com/item/Python/407313

#匹配/item/Python/407313

links = soup.find_all('a', href = re.compile(r"/item/*"))

for link in links:

new_url = link['href']

#例如page_url=http://baike.baidu.com/item/Python new_url=/item/史记·2016?fr=navbar

#则使用parse.urljoin(page_url,new_url)后 new_full_url = http://baike.baidu.com/item/史记·2016?fr=navbar

new_full_url = urllib.parse.urljoin(page_url, new_url)

new_urls.add(new_full_url)

return new_urls

def _get_new_data(self, page_url, soup):

res_data = {}

#url

res_data['url'] = page_url

#

Python

title_node = soup.find('dd', class_ = "lemmaWgt-lemmaTitle-title").find('h1') #获取标题内容

res_data['title'] = title_node.get_text()

#

summary_node = soup.find('div', class_="lemma-summary") #获取简介内容

res_data['summary'] = summary_node.get_text()

return res_data

#new_url路径 html_context界面内容

def parse(self, page_url, html_cont):

if page_url is None or html_cont is None:

return

#python3缺省的编码是unicode, 再在from_encoding设置为utf8, 会被忽视掉,

#去掉【from_encoding = "utf-8"】这一个好了

#soup = BeautifulSoup(html_cont, 'html.parse', from_encoding="utf-8")

soup = BeautifulSoup(html_cont, 'html.parser')

new_urls = self._get_new_urls(page_url, soup)

new_data = self._get_new_data(page_url, soup)

return new_urls,new_data

class HtmlOutputer(object):

def __init__(self):

self.datas = []#存放搜集的数据

def collect_data(self, data):

if data is None:

return

self.datas.append(data)

def output_html(self):

fout = open('output.html', 'w', encoding='utf8')#写入文件 防止中文乱码

fout.write('\n')

fout.write('

\n')

fout.write('

#ascii

for data in self.datas:

fout.write('

\n')

fout.write('

%s' % data['url'])

fout.write('

%s' % data['title'])

fout.write('

%s' % data['summary'])

fout.write("

")

fout.write('

\n')

fout.write('\n')

fout.write('\n')

fout.close()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值