第1关 读取政府工作报告文件
with open('src/政府工作报告.txt', 'r', encoding='utf-8') as f:
txt = f.read()
n = int(input())
print(txt[:n])
第2关 语句切分为列表
# 补充你的代码
with open('src/政府工作报告.txt', 'r', encoding='utf-8') as f:
txt = f.read()
txt = txt.replace(',', ' ').replace('。', ' ').replace(';',' ')
txt = txt.split()
n = int(input())
print(txt[:n])
第3关 获取包含数字的语句
# 补充你的代码
with open('src/政府工作报告.txt', 'r', encoding='utf-8') as f:
txt = f.read()
txt = txt.replace(',', ' ').replace('。', ' ').replace(';',' ')
txt = txt.split()
n = int(input())
num = [str(i) for i in range(10)]
print('\n'.join([c for c in txt if set(c)&set(num)][:n]))
第4关 提取关键词关联语句
# 补充你的代码
with open('src/政府工作报告.txt', 'r', encoding='utf-8') as f:
s = f.read()
education = ['高校','培训','基础研究','学生','扩招','培养','教育'] #教育关联词
environment = ['清洁','森林','排放','土地','植被','能耗','湿地','能源','水体','公园','颗粒物','发电'] # 环保关联词
economic = ['就业','消费','市场','失业','工业','农业','费用','债券','土地','措施','汽车','赤字','企业','经济','生产总值','商品','制造','装备','财政','投资','金融','税','支付','销量','外汇','通胀','收入','贫困','预算','贷款','保险','储备','住房','跨境','进口','出口','进出口','自贸','关税','发电'] #经济关联词
medical = ['卫生','医疗','医学','救助','补助','学科'] #医疗卫生关联词
transport = ['交通','运输','汽车','公路','铁路','机场','货物'] #交通运输关联词
science = ['科技','创新','技术','双创','研究','专精特新','学科'] #科技创新关联词
s = s.replace(',', ' ').replace('。', ' ').replace(';',' ')
s = s.split()
n = input()
num = [str(i) for i in range(10)]
data = [c for c in s if set(c)&set(num)]
def get_data(data, infer):
res = []
for d in data:
for i in infer:
if i in d:
res.append(d)
break
return res
if n == '教育':
data = get_data(data, education)
print('\n'.join(data))
elif n=='环保':
data = get_data(data, environment)
print('\n'.join(data))
elif n=='经济':
data = get_data(data, economic)
print('\n'.join(data))
elif n in ['医疗','卫生']:
data = get_data(data, medical)
print('\n'.join(data))
elif n in ['交通','运输']:
data = get_data(data, transport)
print('\n'.join(data))
elif n in ['科技','创新']:
data = get_data(data, science)
print('\n'.join(data))
elif n == '数据':
print('\n'.join(data))
else:
print('无对应操作')
第5关 提取报告中的高频词
# 补充你的代码
import jieba # jieba是中文分词库,将中文句子切分成词。
import logging # 导入模块,用于设置日志级别
jieba.setLogLevel(logging.INFO) # 关闭jieba日志输出
with open('src/政府工作报告.txt', 'r', encoding='utf-8') as f:
s = f.read()
with open('src/stopword.txt', 'r', encoding='utf-8') as f:
stop = f.read().split() # 读停用词表
s = jieba.lcut(s) # 精确模式分词
s = [c for c in s if c not in stop and len(c)>1]
dic = {}
for c in s: # 统计词频
dic[c] = dic.get(c, 0)+1
n = int(input())
# 排序,高到低
dic = sorted(dic.items(), key=lambda x:x[1], reverse=True)
for k,v in dic[:n]:
print(f'{k:<4}{v:>4}')
如果此文章对你有所帮助,麻烦点个赞,谢谢~~~
点赞加关注,追新不迷路~~~