一、将订单缓存至Redis
1. 创建数据表user_order
create table user_order(
id int(11) not null auto_increment,
order_no varchar(100) not null comment "订单编号",
user_id int(11) not null comment "用户id",
is_active smallint(4) default 1 comment "状态(1=有效;0=无效)",
create_time datetime default current_timestamp comment "下单时间",
update_time timestamp null default null on update current_timestamp,
primary key(id),
unique key idx_order_no(order_no) using btree
) engine=InnoDB auto_increment=5 default charset=utf8 comment="用户下单记录表";
insert into user_order(order_no, user_id, is_active) values ("Book20191222001", 10010, 1);
insert into user_order(order_no, user_id, is_active) values ("Book20191223001", 10011, 1);
insert into user_order(order_no, user_id, is_active) values ("Book20191223003", 10012, 0);
2. 通过逆向工程生成对应的entity, mapper接口和映射文件
3. UserOrderService.java
@EnableScheduling
@Service
public class UserOrderService {
private static final Logger log= LoggerFactory.getLogger(UserOrderService.class);
//雪花算法工具-用于分布式环境(高并发)生成全局唯一ID的工具
private static final Snowflake SNOWFLAKE=new Snowflake(3,2);
//存储至缓存的用户订单编号的前缀
private static final String RedisUserOrderPrefix="SpringBootRedis:UserOrder:";
//用户订单失效的时间配置 - 30s
private static final Long UserOrderTimeOut=30L;
@Autowired
private UserOrderMapper userOrderMapper;
@Autowired
private RedisTemplate redisTemplate;
//下单服务
@Transactional(rollbackFor = Exception.class)
public String putOrder(UserOrder entity) throws Exception{
//用户下单-入库
String orderNo=SNOWFLAKE.nextIdStr();//利用雪花算法生成全局唯一的id
entity.setOrderNo(orderNo);//设置编号
entity.setOrderTime(new Date());//设置时间
int res=userOrderMapper.insertSelective(entity);
if (res>0){
//TODO:插入db之后,将订单编号塞入缓存中,同时设置好ttl
redisTemplate.opsForValue().set(RedisUserOrderPrefix+orderNo,entity.getId(),UserOrderTimeOut,TimeUnit.SECONDS);
}
return orderNo;
}
}
其中利用雪花算法来生成全局唯一ID(订单编号)
4. UserOrderController.java
@RestController
@RequestMapping("user/order")
public class UserOrderController {
private static final Logger log= LoggerFactory.getLogger(UserOrderController.class);
@Autowired
private UserOrderService userOrderService;
//下单
@RequestMapping(value = "put",method = RequestMethod.POST,consumes = MediaType.APPLICATION_JSON_UTF8_VALUE)
public BaseResponse put(@RequestBody @Validated UserOrder userOrder, BindingResult result){
String checkRes= ValidatorUtil.checkResult(result);
if (StrUtil.isNotBlank(checkRes)){
return new BaseResponse(StatusCode.InvalidParams.getCode(),checkRes);
}
BaseResponse response=new BaseResponse(StatusCode.Success);
try {
log.info("--用户下单:{}",userOrder);
String res=userOrderService.putOrder(userOrder);
response.setData(res);
}catch (Exception e){
log.error("--用户下单-发生异常:",e.fillInStackTrace());
response=new BaseResponse(StatusCode.Fail.getCode(),e.getMessage());
}
return response;
}
}
二、利用定时任务调度对订单失效
借助定时任务调度将已失效的订单在数据中修改,其实就是定时任务定时拉取DB中未失效的订单(按照下单时间升序),和缓存一一作对比,判断缓存中hasKey是否为True
@Async表示开启多个线程去异步执行
@Scheduled:服务只要一启动,就会自动地去调用。
//TODO:定时任务调度-拉取出 有效 + 未支付 的订单列表,前往缓存查询订单是否已失效
@Scheduled(cron = "0/1 * * * * ?")
@Async("threadPoolTaskExecutor")
public void schedulerCheckOrder(){
try {
List<UserOrder> orders=userOrderMapper.selectUnPayOrders();
if (orders!=null && !orders.isEmpty()){
orders.forEach(entity -> {
String key=RedisUserOrderPrefix+entity.getOrderNo();
if (!redisTemplate.hasKey(key)){
//TODO:表示缓存中该订单编号已经过期失效了,我们需要前往数据库失效该订单...
userOrderMapper.unActiveOrder(entity.getId());
log.info("----缓存中当前订单超过了TTL未支付,故而失效该表中的对应记录:orderNo={}",entity.getOrderNo());
}
});
}
}catch (Exception e){
log.error("定时任务调度-拉取出 有效 + 未支付 的订单列表,前往缓存查询订单是否已失效-发生异常:",e);
}
}
如果有定时任务调度的话,需要加上一个线程池的配置,多个调度会被分配至多个线程去执行。
@Configuration
public class ThreadConfig {
@Bean("threadPoolTaskExecutor")
public Executor threadPoolTaskExecutor(){
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setCorePoolSize(4);
executor.setMaxPoolSize(8);
executor.setKeepAliveSeconds(10);
executor.setQueueCapacity(8);
executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
return executor;
}
}
缺陷
如果成功下单但是却迟迟未支付的订单量过大时,将很占内存~可能会演变为悲伤的故事。
改进:采用RabbitMQ的死信队列;采用Redisson的延迟队列...