from bs4 import BeautifulSoup
import requests
import operator
data = ""
url = "http://www.chinadaily.com.cn/a/202304/14/WS64389022a31057c47ebba052.html"
html = requests.get(url).text
soup = BeautifulSoup(html, "lxml")
article = soup.find("div",attrs= {"id":"Content"})
for p_tag in article.find_all("p"):
data += p_tag.text
#print(p_tag.text)
#print(data,"\n")
with open("20230414.txt", "w", encoding = "utf-8") as f:
f.write(data)
with open("20230414.txt", "r", encoding = "utf-8") as fp:
data = fp.read()
data = data.translate({ord(c):None for c in list("(),.''")})
data = data.split()
print(data)
word_freq = dict()
for word in data:
if word not in word_freq:
word_freq[word] = 1
else:
word_freq[word] += 1
print(word_freq)
ordered_freq = sorted(word_freq.items(), key = operator.itemgetter(1), reverse = True)
for w , c in ordered_freq:
print(w, c)
爬取(scrap )chinadaily 单词并排序
最新推荐文章于 2024-11-05 14:08:05 发布