python打卡day35

import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as plt
from tqdm import tqdm

# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 加载鸢尾花数据集
iris = load_iris()
X = iris.data  # 特征数据
y = iris.target  # 标签数据

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 归一化数据
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# 将数据转换为PyTorch张量并移至GPU
X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test).to(device)

# 定义不同超参数的模型配置
configs = [
    {"hidden_size": 10, "lr": 0.01, "epochs": 20000},  # 原始配置
    {"hidden_size": 20, "lr": 0.01, "epochs": 20000},  # 增加隐藏层神经元
    {"hidden_size": 10, "lr": 0.05, "epochs": 20000},  # 提高学习率
    {"hidden_size": 10, "lr": 0.01, "epochs": 10000},  # 减少训练轮数
]

for config in configs:
    print(f"\n=== 当前配置: 隐藏层={config['hidden_size']}, 学习率={config['lr']}, 轮数={config['epochs']} ===")
    
    class MLP(nn.Module):
        def __init__(self):
            super(MLP, self).__init__()
            self.fc1 = nn.Linear(4, config['hidden_size'])  # 使用配置的隐藏层大小
            self.relu = nn.ReLU()
            self.fc2 = nn.Linear(config['hidden_size'], 3)

        def forward(self, x):
            out = self.fc1(x)
            out = self.relu(out)
            out = self.fc2(out)
            return out

    model = MLP().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=config['lr'])
    
    # 训练模型
    losses = []
    with tqdm(total=config['epochs'], desc="训练进度", unit="epoch") as pbar:
        for epoch in range(config['epochs']):
            outputs = model(X_train)
            loss = criterion(outputs, y_train)
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if (epoch + 1) % 200 == 0:
                losses.append(loss.item())
                pbar.set_postfix({'Loss': f'{loss.item():.4f}'})
            
            if (epoch + 1) % 1000 == 0:
                pbar.update(1000)
    
    # 评估模型
    model.eval()
    with torch.no_grad():
        outputs = model(X_test)
        _, predicted = torch.max(outputs, 1)
        correct = (predicted == y_test).sum().item()
        accuracy = correct / y_test.size(0)
        print(f'测试集准确率: {accuracy * 100:.2f}%')
使用设备: cpu

=== 当前配置: 隐藏层=10, 学习率=0.01, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:31<00:00, 633.99epoch/s, Loss=0.0607]
测试集准确率: 96.67%

=== 当前配置: 隐藏层=20, 学习率=0.01, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:35<00:00, 565.66epoch/s, Loss=0.0607]
测试集准确率: 96.67%

=== 当前配置: 隐藏层=10, 学习率=0.05, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:27<00:00, 724.62epoch/s, Loss=0.0476]
测试集准确率: 100.00%

=== 当前配置: 隐藏层=10, 学习率=0.01, 轮数=10000 ===
训练进度: 100%|██████████| 10000/10000 [00:16<00:00, 607.61epoch/s, Loss=0.0866]测试集准确率: 96.67%

@浙大疏锦行

Python中实现打卡兑换礼物的功能,通常会涉及到以下几个步骤: 1. **数据结构设计**:创建一个数据库或数据结构来存储用户的打卡记录,比如字典或列表,其中每个元素包含用户ID、日期等信息。 ```python users_gifts = {} # 使用字典,key为用户ID,value为打卡记录 ``` 2. **添加打卡功能**:编写函数,当用户调用时,检查用户是否存在并更新打卡次数。例如,可以使用`datetime`库来记录每日打卡时间。 ```python import datetime def check_in(user_id): today = datetime.datetime.now().strftime("%Y-%m-%d") if user_id not in users_gifts: users_gifts[user_id] = {today: 1} else: if today not in users_gifts[user_id]: users_gifts[user_id][today] = 1 else: users_gifts[user_id][today] += 1 ``` 3. **条件判断与兑换规则**:设定一个规则,如连续7天打卡即可兑换一份礼物。可以遍历用户的打卡记录,检查是否符合条件。 ```python def can_exchange(user_id): user_history = users_gifts.get(user_id, {}) consecutive_days = {} for date, count in user_history.items(): if date - consecutive_days.get(date, '') <= datetime.timedelta(days=6): # 连续6天 consecutive_days[date] = count if len(consecutive_days) == 7: # 找到7连日 return True return False ``` 4. **兑换操作**:如果满足兑换条件,可以删除已达到兑换的打卡记录,并通知用户兑换成功。 ```python def redeem_gift(user_id): if can_exchange(user_id): for day, _ in list(users_gifts[user_id].items())[:7]: # 删除前7天的打卡记录 del users_gifts[user_id][day] print(f"恭喜用户{user_id},您的7天连续打卡已成功兑换礼物!") ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值