Python实验|Excel 文件成绩处理

实验目的:
1、了解扩展库 openpyxl 的安装与使用。
2、熟练运用字典结构解决实际问题。
实验内容:
假设某学校所有课程每学期允许多次考试,学生可随时参加考试,系统自动将每次成绩
添加到 Excel 文件(包含 3 列:姓名,课程,成绩)中,现期末要求统计所有学生每门课
程的最高成绩。
编写程序,模拟生成若干同学的成绩并写入 Excel 文件,其中学生姓名和课程名称均可重复,也就是允许出现同一门课程的多次成绩,最后统计所有学生每门课程的最高成绩,并写入新的 Excel 文件。
实验步骤:
1、在命令提示符环境使用 pip install openpyxl 命令安装扩展库 openpyxl。
2、编写代码。
下面展示一些 代码。

from random import choice, randint
from openpyxl import Workbook, load_workbook
#生成随机数据
def generateRandomInformation(filename):
    workbook = Workbook()
    worksheet = workbook.worksheets[0]
    worksheet.append(['姓名','课程','成绩'])
    #中文名字中的第一、第二、第三个
    first = '赵钱孙李'
    middle = '伟昀琛东'
    last = '坤艳志'
    subjects = ('语文','数学','英语')
    for i in range(200):
        name = choice(first)
        #按一定概率生成只有两个字的中文名字
        if randint(1,100)>50:
            name = name + choice(middle)
        name = name + choice(last)
        #依次生成姓名、课程名称和成绩
        worksheet.append([name, choice(subjects), randint(0, 100)])
    # 保存数据,生成 Excel 2007 格式的文件
    workbook.save(filename)

def getResult(oldfile, newfile):
    # 用于存放结果数据的字典
    result = dict()
    # 打开原始数据
    workbook = load_workbook(oldfile)
    worksheet = workbook.worksheets[0]
    # 遍历原始数据
    for row in worksheet.rows:
        if row[0].value == '姓名':
            continue
        # 姓名,课程名称,本次成绩
        name, subject, grade = map(lambda cell: cell.value, row)
        # 获取当前姓名对应的课程名称和成绩信息
        # 如果 result 字典中不包含,则返回空字典
        t = result.get(name, {})
        # 获取当前学生当前课程的成绩,若不存在,返回 0
        f = t.get(subject, 0)
        # 只保留该学生该课程的最高成绩
        if grade > f:
            t[subject] = grade
            result[name] = t
    workbook1 = Workbook()
    worksheet1 = workbook1.worksheets[0]
    worksheet1.append(['姓名', '课程', '成绩'])
    # 将 result 字典中的结果数据写入 Excel 文件
    for name, t in result.items():
        print(name, t)
        for subject, grade in t.items():
            worksheet1.append([name, subject, grade])
    workbook1.save(newfile)
if __name__ == '__main__':
    oldfile = r'd:\test.xlsx'
    newfile = r'd:\result.xlsx'
    generateRandomInformation(oldfile)
    getResult(oldfile, newfile)
  • 3
    点赞
  • 25
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
文件: import scrapy from demo1.items import Demo1Item import urllib from scrapy import log # BOSS直聘网站爬虫职位 class DemoSpider(scrapy.Spider): # 爬虫名, 启动爬虫时需要的参数*必填 name = 'demo' # 爬取域范围,允许爬虫在这个域名下进行爬取(可选) allowed_domains = ['zhipin.com'] # 爬虫需要的url start_urls = ['https://www.zhipin.com/c101280600/h_101280600/?query=测试'] def parse(self, response): node_list = response.xpath("//div[@class='job-primary']") # 用来存储所有的item字段 # items = [] for node in node_list: item = Demo1Item() # extract() 将xpath对象转换为Unicode字符串 href = node.xpath("./div[@class='info-primary']//a/@href").extract() job_title = node.xpath("./div[@class='info-primary']//a/div[@class='job-title']/text()").extract() salary = node.xpath("./div[@class='info-primary']//a/span/text()").extract() working_place = node.xpath("./div[@class='info-primary']/p/text()").extract() company_name = node.xpath("./div[@class='info-company']//a/text()").extract() item['href'] = href[0] item['job_title'] = job_title[0] item['sa 报错: C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\python.exe "C:\Users\xieqianyun\PyCharm Community Edition 2019.2.5\helpers\pydev\pydevconsole.py" --mode=client --port=55825 import sys; print('Python %s on %s' % (sys.version, sys.platform)) sys.path.extend(['C:\\Users\\xieqianyun\\demo1', 'C:/Users/xieqianyun/demo1']) Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 17:00:18) [MSC v.1900 64 bit (AMD64)] Type 'copyright', 'credits' or 'license' for more information IPython 7.10.0 -- An enhanced Interactive Python. Type '?' for help. PyDev console: using IPython 7.10.0 Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 17:00:18) [MSC v.1900 64 bit (AMD64)] on win32 runfile('C:/Users/xieqianyun/demo1/demo1/begin.py', wdir='C:/Users/xieqianyun/demo1/demo1') Traceback (most recent call last): File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\IPython\core\interactiveshell.py", line 3319, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-2-fc5979762143>", line 1, in <module> runfile('C:/Users/xieqianyun/demo1/demo1/begin.py', wdir='C:/Users/xieqianyun/demo1/demo1') File "C:\Users\xieqianyun\PyCharm Community Edition 2019.2.5\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile pydev_imports.execfile(filename, global_vars, local_vars) # execute the script File "C:\Users\xieqianyun\PyCharm Community Edition 2019.2.5\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "C:/Users/xieqianyun/demo1/demo1/begin.py", line 3, in <module> cmdline.execute('scrapy crawl demo'.split()) File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\cmdline.py", line 145, in execute cmd.crawler_process = CrawlerProcess(settings) File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\crawler.py", line 267, in __init__ super(CrawlerProcess, self).__init__(settings) File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\crawler.py", line 145, in __init__ self.spider_loader = _get_spider_loader(settings) File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\crawler.py", line 347, in _get_spider_loader return loader_cls.from_settings(settings.frozencopy()) File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\spiderloader.py", line 61, in from_settings return cls(settings) File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\spiderloader.py", line 25, in __init__ self._load_all_spiders() File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\spiderloader.py", line 47, in _load_all_spiders for module in walk_modules(name): File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\utils\misc.py", line 73, in walk_modules submod = import_module(fullpath) File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\importlib\__init__.py", line 126, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 994, in _gcd_import File "<frozen importlib._bootstrap>", line 971, in _find_and_load File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 665, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "C:\Users\xieqianyun\demo1\demo1\spiders\demo.py", line 4, in <module> from scrapy import log ImportError: cannot import name 'log'

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值