比如说我们的系统中,有一些使用频繁的数据,但是并不会经常的变动更新,此时可以考虑将数据存在缓存中,例如JVM(不推荐,不指定缓存二级策略会默认缓存到此处),Redis(推荐)。
接下来就需要考虑什么时候需要保存这些缓存数据到Redis,什么时候需要清理掉。我们可以利用mybatis框架源码下的的cache包提供的cache接口进行实现,让框架帮我们在适当的时候调用适当的方法。
/*
* Copyright 2009-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ibatis.cache;
import java.util.concurrent.locks.ReadWriteLock;
/**
* SPI for cache providers.
* <p>
* One instance of cache will be created for each namespace.
* <p>
* The cache implementation must have a constructor that receives the cache id as an String parameter.
* <p>
* MyBatis will pass the namespace as id to the constructor.
*
* <pre>
* public MyCache(final String id) {
* if (id == null) {
* throw new IllegalArgumentException("Cache instances require an ID");
* }
* this.id = id;
* initialize();
* }
* </pre>
*
* @author Clinton Begin
*/
public interface Cache {
/**
* @return 返回的就是mapper全路径类名,对应就是namespace
*/
String getId();
/**
* @param key
* 通过:MappedStatement ms, Object parameterObject, RowBounds rowBounds, BoundSql boundSql生成
* @param value
* 数据库中查询到的数据
*/
void putObject(Object key, Object value);
/**
* @param key
* The key
* @return The object stored in the cache.
*/
Object getObject(Object key);
/**
* As of 3.3.0 this method is only called during a rollback
* for any previous value that was missing in the cache.
* This lets any blocking cache to release the lock that
* may have previously put on the key.
* A blocking cache puts a lock when a value is null
* and releases it when the value is back again.
* This way other threads will wait for the value to be
* available instead of hitting the database.
*
*
* @param key
* The key
* @return Not used
*/
Object removeObject(Object key);
/**
* Clears this cache instance.
*/
void clear();
/**
* Optional. This method is not called by the core.
*
* @return The number of elements stored in the cache (not its capacity).
*/
int getSize();
/**
* Optional. As of 3.2.6 this method is no longer called by the core.
* <p>
* Any locking needed by the cache must be provided internally by the cache provider.
*
* @return A ReadWriteLock
*/
default ReadWriteLock getReadWriteLock() {
return null;
}
}
使用默认的缓存策略,不指定自定义的缓存策略
package com.lopu.yxddyjc.usercenter.mapper;
import com.github.yulichang.base.MPJBaseMapper;
import com.lopu.yxddyjc.commonormmodel.config.MpRedisCache;
import com.lopu.yxddyjc.usercenter.entity.Dept;
import org.apache.ibatis.annotations.CacheNamespace;
/**
* @author jyd
* @date 2022/11/1 13:05
*/
@CacheNamespace
public interface DeptMapper extends MPJBaseMapper<Dept> {}
上面通过默认的缓存策略进行Debug发现最终会将查询出来的缓存数据保存在PerpetualCache此实现中,CacheNamespace 注解的implementation中也可以看出默认使用的缓存策略,主要也是利用了装饰者模式进行层层的包装调用到此处
package org.apache.ibatis.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.ibatis.cache.Cache;
import org.apache.ibatis.cache.decorators.LruCache;
import org.apache.ibatis.cache.impl.PerpetualCache;
@Documented
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface CacheNamespace {
Class<? extends Cache> implementation() default PerpetualCache.class;
Class<? extends Cache> eviction() default LruCache.class;
long flushInterval() default 0;
int size() default 1024;
boolean readWrite() default true;
boolean blocking() default false;
Property[] properties() default {};
}
此种方式显然是利用了HashMap将结果集缓存到了JVM中,此时会降低我们的服务的空闲内存,会加快GC的频率,影响到我们服务的性能。
接下来是我们自定义使用Redis进行外部缓存。可以实现多实例之间缓存共享。
package com.lopu.yxddyjc.commonormmodel.config;
import cn.hutool.extra.spring.SpringUtil;
import com.lopu.yxddyjc.commonormmodel.cache.RedisCache;
import com.lopu.yxddyjc.commonormmodel.constant.CacheConstant;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.cache.Cache;
import org.springframework.data.redis.connection.RedisServerCommands;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.util.CollectionUtils;
import java.util.Collection;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* mybatis-redis 二级缓存
*
* @author jyd
*/
@Slf4j
public class MpRedisCache implements Cache {
/** todo 切换分布式读写锁 */
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(true);
private final String id;
private RedisCache redisCache;
public MpRedisCache(final String id) {
if (id == null) {
throw new IllegalArgumentException("Cache instances require an ID");
}
this.id = id;
}
@Override
public String getId() {
return this.id;
}
@Override
public void putObject(Object key, Object value) {
checkRedis();
if (value != null) {
redisCache.setCacheObject(CacheConstant.PREFIELX + key.toString(), value);
}
}
/**
* 通过:MappedStatement ms, Object parameterObject, RowBounds rowBounds, BoundSql boundSql生成key
*
* @param key The key
* @return 数据
*/
@Override
public Object getObject(Object key) {
checkRedis();
try {
if (key != null) {
return redisCache.getCacheObject(CacheConstant.PREFIELX + key);
}
} catch (Exception e) {
e.printStackTrace();
log.error("缓存出错");
}
return null;
}
@Override
public Object removeObject(Object key) {
checkRedis();
if (key != null) {
redisCache.deleteObject(CacheConstant.PREFIELX + (key));
}
return null;
}
@Override
public void clear() {
log.debug("清空缓存");
checkRedis();
Collection<String> keys = redisCache.keys(CacheConstant.PREFIELX + ("*:" + this.id + "*"));
if (!CollectionUtils.isEmpty(keys)) {
redisCache.deleteObject(keys);
}
}
@Override
public int getSize() {
RedisTemplate<String, Object> redisTemplate = SpringUtil.getBean("redisTemplate");
Long size = redisTemplate.execute(RedisServerCommands::dbSize);
return size == null ? 0 : size.intValue();
}
@Override
public ReadWriteLock getReadWriteLock() {
return this.readWriteLock;
}
public void checkRedis() {
if (redisCache == null) {
try {
redisCache = SpringUtil.getBean(RedisCache.class);
} catch (Exception ignored) {
}
}
}
}
使用方式
package com.lopu.yxddyjc.usercenter.mapper;
import com.github.yulichang.base.MPJBaseMapper;
import com.lopu.yxddyjc.commonormmodel.config.MpRedisCache;
import com.lopu.yxddyjc.usercenter.entity.Dept;
import org.apache.ibatis.annotations.CacheNamespace;
/**
* @author jyd
* @date 2022/11/1 13:05
*/
@CacheNamespace(implementation = MpRedisCache.class, eviction = MpRedisCache.class)
public interface DeptMapper extends MPJBaseMapper<Dept> {}
此时在CacheNamespace注解中指定上我们自定义的缓存实现,然后重启项目进行Debug
此时可以发现,已经可以调用到我们自定义的方法,会去缓存中通过拼接的key去查询我们的数据
此时如果进行新增,修改,删除操作,则会在事务提交的时候通过框架调用到我们的clear()方法,可以将此namespace下所有缓存的数据及时的清除
注意:
1.如果是在单机版本中,可以直接使用上方引入的读写锁ReetrantReadWriteLock,如果是在多实例的环境下则需要替换为分布式读写锁
2.使用的Redis需要配置序列化方式
package com.lopu.yxddyjc.commonormmodel.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import java.io.Serializable;
/**
*
*
* <pre>
* RedisCacheConfig
* 使用fastJson序列
* </pre>
*
* @author jyd
*/
@Configuration
@ConditionalOnClass({RedisOperations.class})
@SuppressWarnings("all")
public class RedisCacheConfig {
@Bean
public RedisSerializer fastJson2JsonRedisSerializer() {
return new FastJson2JsonRedisSerializer<>(Object.class);
}
/*@Bean
public RedisSerializer Jackson2JsonRedisSerializer() {
return new Jackson2JsonRedisSerializer<>(Object.class);
}
*/
@Bean
public RedisTemplate<String, Serializable> redisTemplate(
RedisConnectionFactory redisConnectionFactory, RedisSerializer fastJson2JsonRedisSerializer) {
RedisTemplate<String, Serializable> template = new RedisTemplate<>();
template.setConnectionFactory(redisConnectionFactory);
StringRedisSerializer stringRedisSerializer = new StringRedisSerializer();
template.setKeySerializer(stringRedisSerializer);
template.setHashKeySerializer(stringRedisSerializer);
template.setValueSerializer(fastJson2JsonRedisSerializer);
template.setHashValueSerializer(fastJson2JsonRedisSerializer);
return template;
}
}
package com.lopu.yxddyjc.commonormmodel.config;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.parser.ParserConfig;
import com.alibaba.fastjson.serializer.SerializerFeature;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.SerializationException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
/**
*
*
* <pre>
* FastJson2JsonRedisSerializer
* </pre>
*
* @author jyd
*/
public class FastJson2JsonRedisSerializer<T> implements RedisSerializer<T> {
private static final String BASE_PACKAGE = "com.anji.plus";
private static final String BASE_PACKAGEV2 = "com.taobao.pac.client.sdk.dataobject.";
// 解决高版本fastJson autoType is not support错误
static {
/*ParserConfig.getGlobalInstance().addAccept(BASE_PACKAGE);*/
ParserConfig.getGlobalInstance().addAccept(BASE_PACKAGEV2);
ParserConfig.getGlobalInstance().setAutoTypeSupport(true);
}
public static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
private final Class<T> clazz;
public FastJson2JsonRedisSerializer(Class<T> clazz) {
super();
this.clazz = clazz;
}
/**
* 序列化为字节数组
*
* @param t object to serialize. Can be {@literal null}.
* @return 返回
*/
@Override
public byte[] serialize(T t) throws SerializationException {
if (t == null) {
return new byte[0];
}
return JSON.toJSONString(t, SerializerFeature.WriteClassName).getBytes(DEFAULT_CHARSET);
}
/**
* 将字节数组反序列化为数据
*
* @param bytes object binary representation. Can be {@literal null}.
* @return 数据
*/
@Override
public T deserialize(byte[] bytes) throws SerializationException {
if (bytes == null || bytes.length <= 0) {
return null;
}
String str = new String(bytes, DEFAULT_CHARSET);
return JSON.parseObject(str, clazz);
}
}
我在此处使用的是fastJson序列化方式,也可以使用jackson方式,至于两者区别请自行参阅资料,此处不做解释。