需求
-
已获得的ID不可再次获取
-
需持久化
-
范围有限
-
每次取出的数值都比上一次的+1[避免浪费]
-
高性能
借助MongoDB方式
使用MongoDB的原子操作findAndModify自增后然后取出自增后的值,实现简单。
@Service
public class IdService {
@Resource
private MongoTemplate mongoTemplate;
private static final String COLLECTION = "ids";
private static final String ID = "id";
private static final String PRIMARY_KEY = "_id";
private static final String ID_KEY = "last_id";
//初始化id的值
@PostConstruct
public void init() {
BasicDBObject json = new BasicDBObject();
json.put(PRIMARY_KEY, ID_KEY);
json.put(ID, 1);
try {
mongoTemplate.insert(json, COLLECTION);
} catch (DuplicateKeyException ex) {
}
}
public long nextId() {
Query query = new Query(Criteria.where(PRIMARY_KEY).is(ID_KEY));
Update update = new Update().inc(ID, 1);
final FindAndModifyOptions option = new FindAndModifyOptions();
option.returnNew(true);
BasicDBObject json = mongoTemplate.findAndModify(query, update, option, BasicDBObject.class, COLLECTION);
return json.getLong(ID);
}
复制代码
}
借助Redis方式
当使用数据库来生成ID性能不够要求的时候,我们可以尝试使用Redis来生成ID。
可以用Redis的原子操作INCR和INCRBY来实现 不依赖于数据库,灵活方便,且性能优于数据库。
@Service
public class IdService {
@Resource
private JedisPool jedisPool;
public Long nextId(){
return PoolUtils.doWorkInPool(jedisPool, new PoolUtils.PoolWork<Long>(){
@Override
public Long doWork(Jedis poolResource) {
return poolResource.incr("id");
}
});
}
复制代码
}
public final class PoolUtils {
public static <V> V doWorkInPool(final JedisPool pool, final PoolWork<V> work) {
if (pool == null) {
throw new IllegalArgumentException("pool must not be null");
}
if (work == null) {
throw new IllegalArgumentException("work must not be null");
}
Jedis poolResource = null;
final V result;
try {
poolResource = pool.getResource();
result = work.doWork(poolResource);
} finally {
if (poolResource != null) {
poolResource.close();
}
}
return result;
}
public interface PoolWork<V> {
V doWork(Jedis poolResource);
}
private PoolUtils() {
}
复制代码
}
@Bean(destroyMethod = "close")
public JedisPool jedisPool() {
JedisPoolConfig jedisPoolConfig = new JedisPoolConfig();
jedisPoolConfig.setMaxTotal(Integer.valueOf(env.getProperty("redis.pool.maxActive").trim()));
jedisPoolConfig.setMaxIdle(Integer.valueOf(env.getProperty("redis.pool.maxIdle").trim()));
jedisPoolConfig.setMinIdle(Integer.valueOf(env.getProperty("redis.pool.minIdle").trim()));
jedisPoolConfig.setMaxWaitMillis(Long.valueOf(env.getProperty("redis.pool.maxWaitMillis").trim()));
jedisPoolConfig.setTestOnBorrow(Boolean.valueOf(env.getProperty("redis.pool.testOnBorrow").trim()));
jedisPoolConfig.setTestOnReturn(Boolean.valueOf(env.getProperty("redis.pool.testOnReturn").trim()));
jedisPoolConfig.setTestWhileIdle(Boolean.valueOf(env.getProperty("redis.pool.testWhileIdle").trim()));
jedisPoolConfig.setBlockWhenExhausted(Boolean.valueOf(env.getProperty("redis.pool.blockWhenExhausted").trim()));
jedisPoolConfig.setEvictionPolicyClassName(env.getProperty("redis.pool.evictionPolicyClassName").trim());
jedisPoolConfig.setLifo(Boolean.valueOf(env.getProperty("redis.pool.lifo").trim()));
jedisPoolConfig.setNumTestsPerEvictionRun(Integer.parseInt(env.getProperty("redis.pool.numTestsPerEvictionRun").trim()));
jedisPoolConfig.setMinEvictableIdleTimeMillis(Long.parseLong(env.getProperty("redis.pool.minEvictableIdleTimeMillis").trim()));
jedisPoolConfig.setTimeBetweenEvictionRunsMillis(Long.parseLong(env.getProperty("redis.pool.timeBetweenEvictionRunsMillis").trim()));
jedisPoolConfig.setTestWhileIdle(Boolean.parseBoolean(env.getProperty("redis.pool.testWhileIdle").trim()));
if (!StringUtils.isEmpty(env.getProperty("redis.password"))) {
return new JedisPool(jedisPoolConfig, env.getProperty("redis.host").trim(), Integer.parseInt(env.getProperty("redis.port").trim()), Integer.parseInt(env.getProperty("redis.timeout").trim()), env.getProperty("redis.password").trim());
} else {
return new JedisPool(jedisPoolConfig, env.getProperty("redis.host").trim(), Integer.parseInt(env.getProperty("redis.port").trim()), Integer.parseInt(env.getProperty("redis.timeout").trim()));
}
}
复制代码
MySQL批量ID生成
如何实现同一台服务器在高并发场景,让大家顺序拿号,别拿重复,也别漏拿?
其实就是保持这个号段对象隔离性的问题,可以使用原子变量AtomicLong. 内存中缓存了一段ID号段,此时每次有请求来取号时候,判断一下有没有到最后一个号码,没有到,就拿个号,走人
Long id = currentVal.incrementAndGet();
复制代码
如果到达了最后一个号码,那么阻塞住其他请求线程,最早的那个线程去db取个号段,再更新一下号段的两个值,就可以了。
我们似乎解决了同一台服务器在高并发下的问题,但是如果idService服务多点部署,多个服务在启动过程中,进行ID批量申请时,可能由于并发导致数据不一致。
解决方案:
1、利用数据库悲观锁机制,查询时SQL:select last_id from id for update
2、实施CAS乐观锁,在写回时对last_id的初始条件进行比对,就能避免数据的不一致,写回时SQL:
update id set last_id = last_id +size and last_id = last_id
复制代码
两种锁各有优缺点,不可认为一种好于另一种,像乐观锁适用于写比较少的情况下,即冲突真的很少发生的时候,这样可以省去了锁的开销,加大了系统的整个吞吐量。但如果经常产生冲突,上层应用会不断的进行retry,这样反倒是降低了性能,所以这种情况下用悲观锁就比较合适。
ID生成实现
数据库设计:
CREATE TABLE IF NOT EXISTS `id` (
`last_id` bigint(20) unsigned NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
--
-- 转存表中的数据 `id`
--
INSERT INTO `id` (`last_id`) VALUES (0);
--
-- Indexes for dumped tables
--
--
-- Indexes for table `id`
--
ALTER TABLE `id`
ADD PRIMARY KEY (`last_id`);
复制代码
实现:
@Service
public class IdService {
private static final Logger logger = LoggerFactory.getLogger(IdService.class);
private AtomicLong currentVal = new AtomicLong(0L);
private AtomicLong maxVal = new AtomicLong(0L);
private static final long FETCH_SIZE = 50;// 每次生成50个id
@Resource
private IdMapper idMapper;
@PostConstruct
public void init() {
fecth();
}
/**
* 获取自增ID序列
*
* @return
*/
public Long nextId() {
if (currentVal.get() >= maxVal.get()) {
synchronized (this) {
if (currentVal.get() >= maxVal.get()) {
fecth();
}
}
}
return currentVal.incrementAndGet();
}
private void fecth() {
int retry = 0;
while (retry < 10) {
IdCriteria idCriteria = new IdCriteria();
idCriteria.setLimitEnd(1);
final List<Id> ids = idMapper.selectByExample(idCriteria);
int row = idMapper.inc(FETCH_SIZE, ids.get(0).getLastId());
if (row > 0) {
currentVal.set(ids.get(0).getLastId());
maxVal.set(ids.get(0).getLastId() + FETCH_SIZE);
return;
}
retry++;
}
logger.error(Constants.MARKER_INT, "update id failed after 10 times.");
throw new RuntimeException("update id failed after 10 times.");
}
复制代码
}