下面三种速度都没问题,根据实际情况选择即可
一、代码层面切割好list,然后插入
// package org.apache.commons.collections4; 先将list切成1000条一份
List<List<DeptDO>> p1 = ListUtils.partition(deptList, 1000);
for (List<DeptDO> deptDOS : p1) {
// 1000条一次批量插入
systemDeptMapper.insertBatch(deptDOS);
}
二、使用mybatisplus批量插入方法
/**
* 插入(批量)
*
* @param entityList 实体对象集合
*/
public static <T> boolean saveBatch(Collection<T> entityList) {
return saveBatch(entityList, IService.DEFAULT_BATCH_SIZE);
}
/**
* 插入(批量)
*
* @param entityList 实体对象集合
* @param batchSize 插入批次数量
*/
public static <T> boolean saveBatch(Collection<T> entityList, int batchSize) {
if (CollectionUtils.isEmpty(entityList)) {
return false;
}
Class<T> entityClass = getEntityClass(entityList);
Class<?> mapperClass = ClassUtils.toClassConfident(getTableInfo(entityClass).getCurrentNamespace());
String sqlStatement = SqlHelper.getSqlStatement(mapperClass, SqlMethod.INSERT_ONE);
return SqlHelper.executeBatch(entityClass, LogFactory.getLog(Db.class), entityList, batchSize, (sqlSession, entity) -> sqlSession.insert(sqlStatement, entity));
}
三、原生jdbc批量插入
@Resource(name = "dataSource")
private DataSource dataSource;
@Override
public void insertJdbc(List<User> userList) throws SQLException {
Connection connection = null;
PreparedStatement statement = null;
try {
connection = dataSource.getConnection();
connection.setAutoCommit(false);
String sql = "INSERT INTO `user` (`username`, `address`, `remark`, `age`, `create_time`) " +
"VALUES (?,?,?,?,now()) ";
statement = connection.prepareStatement(sql);
for (User user : userList) {
statement.setString(1, user.getUsername());
statement.setString(2, user.getAddress());
statement.setString(3, user.getRemark());
statement.setInt(4, user.getAge());
statement.addBatch();
}
statement.executeBatch();
connection.commit();
} catch (SQLException throwables) {
throwables.printStackTrace();
} finally {
statement.close();
connection.close();
}
}