问题补充:
飞雪无情 写道
spring这个批量插入有点限制,比如你这个特殊需要,我感觉它实现不了,所以你用我上面说的那个方法迂回实现。我感觉性能方面应该不会有太大的影响。你看spring的批量插入的时候这个BatchPreparedStatementSetter借口的方法setValues(PreparedStatement ps, int i) ,提供了一个索引i,它肯定也遍历了,要么怎么知道i的值。所以有特殊需求的时候就用上面那种方案,没有的时候推荐用spring提供了批量操作,我们项目中就这么做的。
看了Spring这两个方法的源码,觉得这个东西在封装的时候考虑的不是很周全,确实局限性太大了,比如我批量保存的时候有可能一部分是插入,一部分是更新,这个时候也只能按照你说的这种方式了,比较而言还是orm框架持久层用起来的方便。
spring的这个两个方法看了源码感觉在设计上确实存在缺陷,灵活性不够,其实返回数据库执行的条数意义根本就不大
//其实我想的是这样的一个方法
/**
*
* @author BAOSJ
* @date 2010-7-2
* @param sql
* @param values
* @param batchSize
* @return
* @throws Exception
*/
public List saveOrUpdateAll(String sql, List values,
Integer batchSize) throws Exception {
ResultSet rs = null;
PreparedStatement ps = null;
Connection conn = null;
// 返回执行对象的id
List ids = new ArrayList();
try {
conn = getConnection();
conn.setAutoCommit(false);
ps = conn.prepareStatement(sql,
PreparedStatement.RETURN_GENERATED_KEYS);
for (int i = 0; i < values.size(); i++) {
Object[] objects = values.get(i);
for (int j = 0; j < objects.length; j++) {
ps.setObject(i + 1, objects[j]);
}
if (i % batchSize == 0 || values.size() <= batchSize) {
ps.executeBatch();
rs = ps.getGeneratedKeys();
int c = 0;
while (rs.next()) {
ids.add(rs.getObject(c));
c++;
}
}
}
conn.commit();
} catch (Exception e) {
conn.rollback();
throw e;
} finally {
destroy(rs, ps, conn);
}
return ids;
}
//update源码
public int update(final PreparedStatementCreator psc, final KeyHolder generatedKeyHolder)
throws DataAccessException {
Assert.notNull(generatedKeyHolder, "KeyHolder must not be null");
logger.debug("Executing SQL update and returning generated keys");
Integer result = (Integer) execute(psc, new PreparedStatementCallback() {
public Object doInPreparedStatement(PreparedStatement ps) throws SQLException {
int rows = ps.executeUpdate();
List generatedKeys = generatedKeyHolder.getKeyList();
generatedKeys.clear();
ResultSet keys = ps.getGeneratedKeys();
if (keys != null) {
try {
RowMapper rowMapper = getColumnMapRowMapper();
RowMapperResultSetExtractor rse = new RowMapperResultSetExtractor(rowMapper, 1);
generatedKeys.addAll((List) rse.extractData(keys));
}
finally {
JdbcUtils.closeResultSet(keys);
}
}
if (logger.isDebugEnabled()) {
logger.debug("SQL update affected " + rows + " rows and returned " + generatedKeys.size() + " keys");
}
return new Integer(rows);
}
});
return result.intValue();
}
//batchUpdate源码
public int[] batchUpdate(String sql, final BatchPreparedStatementSetter pss) throws DataAccessException {
if (logger.isDebugEnabled()) {
logger.debug("Executing SQL batch update [" + sql + "]");
}
return (int[]) execute(sql, new PreparedStatementCallback() {
public Object doInPreparedStatement(PreparedStatement ps) throws SQLException {
try {
int batchSize = pss.getBatchSize();
InterruptibleBatchPreparedStatementSetter ipss =
(pss instanceof InterruptibleBatchPreparedStatementSetter ?
(InterruptibleBatchPreparedStatementSetter) pss : null);
if (JdbcUtils.supportsBatchUpdates(ps.getConnection())) {
for (int i = 0; i < batchSize; i++) {
pss.setValues(ps, i);
if (ipss != null && ipss.isBatchExhausted(i)) {
break;
}
ps.addBatch();
}
return ps.executeBatch();
}
else {
List rowsAffected = new ArrayList();
for (int i = 0; i < batchSize; i++) {
pss.setValues(ps, i);
if (ipss != null && ipss.isBatchExhausted(i)) {
break;
}
rowsAffected.add(new Integer(ps.executeUpdate()));
}
int[] rowsAffectedArray = new int[rowsAffected.size()];
for (int i = 0; i < rowsAffectedArray.length; i++) {
rowsAffectedArray[i] = ((Integer) rowsAffected.get(i)).intValue();
}
return rowsAffectedArray;
}
}
finally {
if (pss instanceof ParameterDisposer) {
((ParameterDisposer) pss).cleanupParameters();
}
}
}
});
}