# -*- coding: utf-8 -*import re
import feedparser
#把html中的单词解析出来并转为小写defgetwords(html):
txt = re.compile(r'<[^>]+>').sub('', html)
words = re.compile(r'[^A-Z^a-z]+').split(txt)
return [word.lower() for word in words if word != '']
#得到博客全部的单词defgetwordcounts(url):
d = feedparser.parse(url)
wc = {}
for e in d.entries:
if'summary'in e:
summary = e.summary
else:
summary = e.description
words = getwords(e.title + '' + summary)
for word in words:
wc.setdefault(word, 0)
wc[word] += 1return d.feed.title, wc
if __name__ == '__main__':
# 出现这些单词的博客数
blogcount = {}
#对每个博客,建立一个单词统计表
wordcounts = {}
feedlist = [line for line in file('C:\\Users\\DELL\\Desktop\\feedlist.txt')]
for feedurl in feedlist:
title, wc = getwordcounts(feedurl)
wordcounts[title] = wc
for word, count in wc.items():
blogcount.setdefault(word, 0)
if count > 1:
blogcount[word] += 1#单词列表,去掉频率过大或者过小的单词
wordlist = []
for w, bc in blogcount.items():
frac = float(bc) / len(feedlist)
if frac > 0.1and frac < 0.5:
wordlist.append(w)
datafile = file('blogdata', 'w')
datafile.write('Blog')
for word in wordlist:
datafile.write('\t%s' % word)
datafile.write('\n')
for blogname, wc in wordcounts.items():
print blogname
datafile.write(blogname)
for word in wordlist:
if word in wc:
datafile.write("\t%d" % wc[word])
else:
datafile.write("\t0")
datafile.write('\n')