有个老代码数据是一条一条的插入导数据库,感觉有点慢,需要用批量插入优化下,那么批量插入比一条一条插入快多少,下面就做个简单的实验供大家参考。
数据表
CREATE TABLE `test_bak`
(
`ID` INT(4) UNSIGNED NOT NULL,
`IsRedefned` INT(4) UNSIGNED NOT NULL DEFAULT 0,
`Level` INT(4) UNSIGNED NOT NULL DEFAULT 0,
`Type` INT(4) UNSIGNED NOT NULL DEFAULT 1,
`Description` VARCHAR(256) NOT NULL DEFAULT '',
`Solution` VARCHAR(256) NOT NULL DEFAULT '',
PRIMARY KEY (`ID`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
单条插入一千万条:
for (U32 i = 1; i <= 10000000;i++){
ss << "insert into " << m_table.table_bak()
<< " values"
<< " (" << i
<< ", " << 0
<< ", " << 1
<< ", " << 1
<< ", " << "1"
<< ", " << "1"
<< ")";
ret = GetDB()->WriteDB(ss.str().c_str(), ss.str().length());
ss.str("");
}
一共耗时323秒。
一次插入一百万:
ss << "insert into " << m_table.table_bak() << " values";
int key = 0;
U32 i = 1;
for (i = 1; i <= 10000000; i++){
if (i != key+1){
ss << " , ";
}
ss << " (" << i
<< ", " << 0
<< ", " << 1
<< ", " << 1
<< ", " << "1"
<< ", " << "1"
<< ")";
if (i % 1000000 == 0){
ret = GetDB()->WriteDB(ss.str().c_str(), ss.str().length());
ss.str("");
key = i;
ss << "insert into " << m_table.table_bak() << " values";
}
}
if ((i - 1) % 1000000 != 0){
ret = GetDB()->WriteDB(ss.str().c_str(), ss.str().length());
}
耗时:41秒
从结果看快了7.8倍,这主要是减少了网络交互,SQL语句的解析次数和MYSQL内部各种日志记录的次数
看来以后写程序还是批量插入比较好!!!