max_join_size报错

当尝试执行大型JOIN查询时,遇到了MySQL的MAX_JOIN_SIZE错误。通过设置SQL_BIG_SELECTS=1可以临时解决,但问题根源在于启动脚本、my.cnf配置文件或用户配置。检查发现用户配置文件中的绑定IP导致了限制,移除特定参数后问题解决。
摘要由CSDN通过智能技术生成
问题:

MariaDB [fltpricedb]> select count(id) from fdflightcabinpricedetail;
ERROR 1104 (42000): The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay
MariaDB [fltpricedb]> SET SQL_BIG_SELECTS=1;
Query OK, 0 rows affected (0.00 sec)

MariaDB [fltpricedb]> select count(id) from fdflightcabinpricedetail;
+-----------+
| count(id) |
+-----------+
|  11491064 |
+-----------+
1 row in set (2.50 sec)


思路:

#/data/mysql/bin/my_print_defaults  --mysqld

--lower_case_table_names=1
--port=33107
--socket=/data/mysql/mysql.sock
--pid-file=/data/mysql/db01.zp.com.pid
--basedir=/data/mysql/
--datadir=/data/mysql/data
--tmpdir=/data/mysql/tmp
--character-set-server=utf8
--default-storage-engine=INNODB
--sql-mode=STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
--table_open_cache=2000
--open_files_limit=65535
--sort_buffer_size=2M
--thread_cache_size=300
--tmp_table_size=256M
--key_buffer_size=2048M
--read_buffer_size=1M
--read_rnd_buffer_size=16M
--query_cache_type=0
--query_cache_size=20M
--query_cache_limit=0M
--skip-name-resolve
--skip-host-cache
--skip-external-locking
--init_connect=SET autocommit  =  0
--init_connect=SET NAMES utf8
--init_connect=SET character_set_client  =  utf8
--init_connect=SET character_set_results  =  utf8
--init_connect=SET character_set_connection  =  utf8
--skip-character-set-client-handshake
--wait_timeout=10
--max_allowed_packet=64M
--max_connections=1200
--max_connect_errors=6000
--event_scheduler=ON
--general_log=0
--general_log_file=/data/mysql/mysql_logs/mysql.log
--log-output=file
--log_warnings=2
--back_log=1500
--server_id=1010
--binlog-format=ROW
--log-output=file
--log-bin=/data/mysql/mysql_logs/binary_log/mysqldb01-bin
--log-bin-index=/data/mysql/mysql_logs/binary_log/mysqldb01-bin.index
--binlog_cache_size=4M
--max_binlog_size=100M
--max_binlog_cache_size=512m
--expire_logs_days=10
--binlog-checksum=CRC32
--master-verify-checksum=1
--slave-sql-ver
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
import torch import os import torch.nn as nn import torch.optim as optim import numpy as np import random import matplotlib.pyplot as plt class Net(nn.Module): def init(self): super(Net, self).init() self.conv1 = nn.Conv2d(1, 16, kernel_size=3,stride=1) self.pool = nn.MaxPool2d(kernel_size=2,stride=2) self.conv2 = nn.Conv2d(16, 32, kernel_size=3,stride=1) self.fc1 = nn.Linear(32 * 9 * 9, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) def forward(self, x): x = self.pool(nn.functional.relu(self.conv1(x))) x = self.pool(nn.functional.relu(self.conv2(x))) x = x.view(-1, 32 * 9 * 9) x = nn.functional.relu(self.fc1(x)) x = nn.functional.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) folder_path1 = 'random_matrices2' # 创建空的tensor x = torch.empty((40, 1, 42, 42)) # 遍历文件夹内的文件,将每个矩阵转化为tensor并存储 for j in range(40): for j in range(40): file_name = 'matrix_{}.npy'.format(i) file_path1 = os.path.join(folder_path1, file_name) matrix1 = np.load(file_path1) x[j] = torch.from_numpy(matrix1).unsqueeze(0) folder_path2 = 'random_label2' y = torch.empty((40, )) for k in range(40): for k in range(40): file_name = 'label_{}.npy'.format(i) file_path2 = os.path.join(folder_path2, file_name) matrix2 = np.load(file_path2) y[k] = torch.from_numpy(matrix2) losses = [] for epoch in range(10): running_loss = 0.0 for i in range(40): inputs, labels = x[i], y[i] optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() losses.append(running_loss / 40) print('[%d] loss: %.3f' % (epoch + 1, running_loss / 40)) print('Finished Training') plt.plot(losses) plt.xlabel('Epoch') plt.ylabel('Loss') plt.show() 报错:ValueError: Expected input batch_size (1) to match target batch_size (0). 不改变y的形状,改变输出,怎么修改?
05-25
你可以在创建空的tensor y 时,指定数据类型为 long 并且指定 y 的维度为 (40,1),然后在读取标签文件时,先将 numpy 数组转换为 torch tensor,并且使用 squeeze() 方法将维度为 1 的维度去掉,最后使用 view() 方法将维度转换为 (1, )。修改后的代码如下: ```python import torch import os import torch.nn as nn import torch.optim as optim import numpy as np import random import matplotlib.pyplot as plt class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=3,stride=1) self.pool = nn.MaxPool2d(kernel_size=2,stride=2) self.conv2 = nn.Conv2d(16, 32, kernel_size=3,stride=1) self.fc1 = nn.Linear(32 * 9 * 9, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) def forward(self, x): x = self.pool(nn.functional.relu(self.conv1(x))) x = self.pool(nn.functional.relu(self.conv2(x))) x = x.view(-1, 32 * 9 * 9) x = nn.functional.relu(self.fc1(x)) x = nn.functional.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) folder_path1 = 'random_matrices2' # 创建空的tensor x = torch.empty((40, 1, 42, 42)) # 遍历文件夹内的文件,将每个矩阵转化为tensor并存储 for j in range(40): file_name = 'matrix_{}.npy'.format(j) file_path1 = os.path.join(folder_path1, file_name) matrix1 = np.load(file_path1) x[j] = torch.from_numpy(matrix1).unsqueeze(0) folder_path2 = 'random_label2' # 创建空的tensor y = torch.empty((40, 1), dtype=torch.long) # 遍历文件夹内的文件,将每个标签转化为tensor并存储 for k in range(40): file_name = 'label_{}.npy'.format(k) file_path2 = os.path.join(folder_path2, file_name) matrix2 = np.load(file_path2) y[k] = torch.from_numpy(matrix2).squeeze().view(1) losses = [] for epoch in range(10): running_loss = 0.0 for i in range(40): inputs, labels = x[i], y[i] optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() losses.append(running_loss / 40) print('[%d] loss: %.3f' % (epoch + 1, running_loss / 40)) print('Finished Training') plt.plot(losses) plt.xlabel('Epoch') plt.ylabel('Loss') plt.show() ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值