网页正文提取

from goose import Goose
from goose.text import StopWordsChinese
url  = 'http://blog.raphaelzhang.com/2012/03/html-text-extractor/'
g = Goose({'stopwords_class': StopWordsChinese})
article = g.extract(url=url)
print article.cleaned_text[:]

#!/usr/bin/env python#coding:utf-8from SimpleXMLRPCServer import SimpleXMLRPCServerimport htmllib,urllib2import formatter,StringIOimport urllibfrom bs4 import BeautifulSoupimport chardet class TrackingParser(htmllib.HTMLParser): """Try to keep accurate pointer of parsing location.""" def __init__(self, writer, *args): htmllib.HTMLParser.__init__(self, *args) self.writer = writer def parse_starttag(self, i): index = htmllib.HTMLParser.parse_starttag(self, i) self.writer.index = index #print 'vvvvvvvvvvvvvvvvvvvvvvv\n' #print index return index def parse_endtag(self, i): #print 'vvvvvvvvvvvvvvvvvvvvvvv\n' self.writer.index = i return htmllib.HTMLParser.parse_endtag(self, i)class Paragraph: def __init__(self): self.text = '' self.bytes = 0 self.density = 0.0class LineWriter(formatter.AbstractWriter): def __init__(self, *args): self.last_index = 0 self.index = 0 self.lines = [Paragraph()] formatter.AbstractWriter.__init__(self) def send_flowing_data(self, data): # Work out the length of this text chunk. t = len(data) # We've parsed more text, so increment index. self.index += t # Calculate the number of bytes since last time. b = self.index - self.last_index self.last_index = self.index # Accumulate this information in current line. l = self.lines[-1] l.text += data l.bytes += b def send_paragraph(self, blankline): """Create a new paragraph if necessary.""" if self.lines[-1].text == '': return #self.lines[-1].text += 'n' * (blankline+1) #self.lines[-1].bytes += 2 * (blankline+1) self.lines.append(Paragraph()) def send_literal_data(self, data): self.send_flowing_data(data) def send_line_break(self): self.send_paragraph(0) def output(self): #print '6666666666666666666' self.compute_density() output = StringIO.StringIO() for l in self.lines: #print '7777777777777777777' if l.density >0.3:#这里就是我们设置的行文本密度 output.write(l.text) return output.getvalue() def compute_density(self): """Calculate the density for each line, and the average.""" total = 0.0 for l in self.lines: l.density = len(l.text) / float(l.bytes) total += l.density # Store for optional use by the neural network. self.average = total / float(len(self.lines)) def output(self): """Return a string with the useless lines filtered out.""" self.compute_density() output = StringIO.StringIO() for l in self.lines: # Check density against threshold. # Custom filter extensions go here. if l.density > 0.5: output.write(l.text) return output.getvalue()def extract_text(html): writer = LineWriter() fmt = formatter.AbstractFormatter(writer) parser = TrackingParser(writer,fmt) parser.feed(html) parser.close() return writer.output()def main(url): print url html = urllib2.urlopen(url).read() dict_encode_type = chardet.detect(html) encode_type = dict_encode_type['encoding'] print encode_type if encode_type !=None: html = html.decode(encode_type,'ignore').encode("utf-8") html_text = ''.join(BeautifulSoup(html).findAll(text=lambda text: text.parent.name != "script" and text.parent.name != "style")) s = extract_text(html) print s return sserver = SimpleXMLRPCServer(("localhost", 8000))server.register_function(main)server.serve_forever()
from xmlrpclib import ServerProxy
server = ServerProxy("http://localhost:8000")
url1 = "http://www.chinanews.com/gn/2013/09-27/5331153.shtml"
try:
    ret = server.main(url1)
    print ret
except Exception as ex:
    print "exception", ex


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值