【太久远了,已经忘记当初写的这些具体是干什么的了,请将就看吧。】
文件1:爬取网页上的姓名信息
import requests
from lxml import etree
from bs4 import BeautifulSoup
import re
import json
# 根据排行榜每页的url规律,构造出排行榜页面的url:
def get_url_1(page):
head = 'http://www.jjwxc.net/bookbase.php?fw0=0&fbsj=0&ycx1=1&xx2=2&mainview0=0&sd0=0&lx0=0&fg0=0&sortType=2&page='
tail = '&isfinish=0&collectiontypes=ors&searchkeywords='
var = str(page)
url = head + var + tail
return url
# 构造每篇文章的url:
def get_url_2(number):
head = 'http://www.jjwxc.net/'
var = str(number)
url = head + var
return url
# 根据排行榜url,获取各文章的链接:
def get_info_1(url_1):
# 伪装成浏览器,防止封ip
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'Host': 'www.jjwxc.net',
'Cookie': '__gads=ID=adb1bd199ca80e42:T=1592032062:S=ALNI_MbfwuQah_VUIJ0eFciwrmcI0YVBcQ; CNZZDATA1275156903=1619785691-1592040877-null%7C1592040877; __cfduid=d5561249470bba6af47bba14f331c644e1592045059; UM_distinctid=172ad47ba1a345-0e33c371452b35-f7d123e-144000-172ad47ba1de; timeOffset_o=-395.800048828125; CNZZDATA30075907=cnzz_eid%3D149420612-1592030978-null%26ntime%3D1594286393; testcookie=yes; Hm_lvt_bc3b748c21fe5cf393d26c12b2c38d99=1592471583,1594093184,1594178729,1594290569; token=Mjk5MDY4Mjd8ZWE4Y2Q1ZTc5OTIzZjNjNDgxMmNmZjU5NDI1MGEyMzl8fHx8MTA4MDB8MXx8fOaZi%2Baxn%2BeUqOaIt3wxfG1vYmlsZQ%3D%3D; JJSESS=%7B%22clicktype%22%3A%22%22%2C%22nicknameAndsign%22%3A%222%257E%2529%2524%25E6%25B4%25BB%25E7%259D%2580%25E4%25B8%25BA%25E4%25BA%2586%25E4%25BB%2580%25E4%25B9%2588%22%7D; JJEVER=%7B%22sms_total%22%3A%220%22%2C%22fenzhan%22%3A%22noyq%22%2C%22ispayuser%22%3A%2229906827-1%22%2C%22foreverreader%22%3A%2229906827%22%2C%22user_signin_days%22%3A%2220200709_29906827_3%22%7D; Hm_lpvt_bc3b748c21fe5cf393d26c12b2c38d99=1594291386',
}
tries = 10
while tries > 0:
try:
rsp = requests.get(url_1, headers=headers)
break
except Exception as e:
tries -= 1
# 防止中文乱码
rsp.encoding = rsp.apparent_encoding
data = rsp.text
selector = etree.HTML(data)
# 获取一页所有文章的超链接
url_2 = selector.xpath('//html//body//table//tbody//tr//td[2]//a/@href')
return url_2
# 获取文章页面的主角名:
def get_info_2(url_2):
# 伪装成浏览器,防止封ip
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'Host': 'www.jjwxc.net',
'Cookie': '__gads=ID=adb1bd199ca80e42:T=1592032062:S=ALNI_MbfwuQah_VUIJ0eFciwrmcI0YVBcQ; CNZZDATA1275156903=1619785691-1592040877-null%7C1592040877; __cfduid=d5561249470bba6af47bba14f331c644e1592045059; UM_distinctid=172ad47ba1a345-0e33c371452b35-f7d123e-144000-172ad47ba1de; JJEVER=%7B%22sms_total%22%3A%220%22%2C%22fenzhan%22%3A%22noyq%22%2C%22ispayuser%22%3A%2229906827-1%22%2C%22foreverreader%22%3A%2229906827%22%7D; testcookie=yes; timeOffset_o=-395.800048828125; Hm_lvt_bc3b748c21fe5cf393d26c12b2c38d99=1592045842,1592104513,1592471583,1594093184; JJSESS=%7B%22clicktype%22%3A%22%22%7D; CNZZDATA30075907=cnzz_eid%3D149420612-1592030978-null%26ntime%3D1594098318; Hm_lpvt_bc3b748c21fe5cf393d26c12b2c38d99=1594102834',
}
tries = 10
while tries > 0:
try:
rsp = requests.get(url_2, headers=headers)
break
except Exception as e:
tries -= 1
soup = BeautifulSoup(rsp.content, 'lxml')
# 获取主角名
head_text = soup.find_all('span', class_='bluetext')
if head_text == []:
return []
string = str(head_text[0]) # 主角名栏内容
string = string[32:] # 删除前部分
string = re.sub(u" \\┃.*?\\>|\\(.*?\\)|\\【.*?\\】|\\(.*?\\)|·|★|…|\\《.*?\\》", "", string) # 删除后部分和括号内容
string = re.sub(",|、|;|x|X|/|,", " ", string)
their_name = re.split(' ', string)
print(their_name)
return their_name # 输出主角名的列表
# 字典保存为txt文件:
def save_dict(dict, which):
num_str = which + '.txt'
js = json.dumps(dict)
file = open(num_str, 'w')
file.write(js)
file.close()
# 统计姓名的常用字:
def fir_last_name(name_list, min_page, max_page):
# 统计成字典形式
word_num = {}
name_str = ''.join(name_list)
for i in range(len(name_str)):
word_num[name_str[i]] = name_str.count(name_str[i])
# 按value值排序
word_num = sorted(word_num.items(), key=lambda x: x[1], reverse=True)
# 保存为txt文件
its_name_1 = '姓名的常用字' + " " + str(min_page) + "--" + str(max_page)
save_dict(word_num, its_name_1)
return
def last_name(name_list, min_page, max_page):
# 获取姓氏列表
last_list = []
for i in range(len(name_list)):
last_list.append(str(name_list[i])[0])
# 统计成字典
word_num = {}
last_str = ''.join(last_list)
for i in range(len(last_str)):
word_num[last_str[i]] = last_str.count(last_str[i])
# 按value值排序
word_num = sorted(word_num.items(), key=lambda x: x[1], reverse=True)
print(word_num)
# 保存为txt文件
its_name_1 = '姓氏的常用字' + " " + str(min_page) + "--" + str(max_page)
save_dict(word_num, its_name_1)
return
def main():
# 获取主角名字列表:
name = []
min = 1
max = 2
for page in range(min, max): # 每页
print("第", page, "页")
link_1 = get_url_1(page)
link_2_half = get_info_1(link_1)
for num in range(0, len(link_2_half)): # 每篇文章
print("文章位:", page, ".", num)
link_2 = get_url_2(link_2_half[num])
info = get_info_2(link_2)
name += info
name = [x.strip() for x in name if x.strip() != '']
# 统计姓名的常用字:
fir_last_name(name, min, max)
# 统计姓氏的常用字:
last_name(name, min, max)
return
if __name__ == '__main__':
main()
文件2:
import json
import matplotlib.pyplot as plt
from wordcloud import WordCloud, ImageColorGenerator
import cv2
def read_dict(which):
file = open(which, 'r')
js = file.read()
dic = dict(json.loads(js))
file.close()
return dic
# 4.生成词云图并保存
def generate_word_cloud(dictionary):
# color_mask = cv2.imread('./background.jpg')
cloud = WordCloud(
# 设置字体,不指定就会出现乱码,文件名不支持中文
font_path="C:/Windows/Fonts/STXINGKA.TTF",
# font_path=path.join(d,'simsun.ttc'),
# 设置背景色,默认为黑,可根据需要自定义为颜色
background_color='white',
# 词云形状,
# mask=color_mask,
# 允许最大词汇
max_words=100,
# 最大号字体,如果不指定则为图像高度
max_font_size=120,
# 画布宽度和高度,如果设置了mask则不会生效
# 词语水平摆放的频率,默认为0.9.即竖直摆放的频率为0.1
prefer_horizontal=0.9,
# 清晰度
scale=32
)
cloud.generate_from_frequencies(dictionary)
plt.imshow(cloud)
plt.axis("off") # 不显示坐标轴
plt.show()
cloud.to_file('Wordcloud.png') # 保存的图片命名为Wordcloud.png
return
def main():
dic_1 = read_dict("姓氏的常用字 1--20.txt")
print(dic_1)
generate_word_cloud(dic_1)
return
if __name__ == '__main__':
main()