mybatis-3.4.x 从源码看缓存的使用[笔记三]

从源码看mybatis缓存

  1. 简单看下SqlSession的创建
  //DefaultSqlSessionFactory.java
  private SqlSession openSessionFromDataSource(ExecutorType execType, TransactionIsolationLevel level, boolean autoCommit) {
    Transaction tx = null;
    try {
      final Environment environment = configuration.getEnvironment();
      final TransactionFactory transactionFactory = getTransactionFactoryFromEnvironment(environment);
      //事务管理器
      tx = transactionFactory.newTransaction(environment.getDataSource(), level, autoCommit);
      //执行器 由Executor处理缓存,见下文
      final Executor executor = configuration.newExecutor(tx, execType);
      return new DefaultSqlSession(configuration, executor, autoCommit);
    } catch (Exception e) {
      closeTransaction(tx); // may have fetched a connection so lets call close()
      throw ExceptionFactory.wrapException("Error opening session.  Cause: " + e, e);
    } finally {
      ErrorContext.instance().reset();
    }
  }

通过装饰器模式,包装Executor,丰富Executor的功能

  /*详见Configuration.java*/
  public Executor newExecutor(Transaction transaction, ExecutorType executorType) {
    executorType = executorType == null ? defaultExecutorType : executorType;
    executorType = executorType == null ? ExecutorType.SIMPLE : executorType;

    Executor executor;
    if (ExecutorType.BATCH == executorType) {
      executor = new BatchExecutor(this, transaction);
    } else if (ExecutorType.REUSE == executorType) {
      executor = new ReuseExecutor(this, transaction);
    } else {
      executor = new SimpleExecutor(this, transaction);
    }
    //默认为true,包装成缓存执行器
    if (cacheEnabled) {
      executor = new CachingExecutor(executor);
    }
    //成为拦截器代理对象
    executor = (Executor) interceptorChain.pluginAll(executor);
    return executor;
  }

CachingExecutor对查询的处理,处理二级缓存

  /*详见CachingExecutor.java*/
  @Override
  public <E> List<E> query(MappedStatement ms, Object parameterObject, RowBounds rowBounds, ResultHandler resultHandler, CacheKey key, BoundSql boundSql)
      throws SQLException {
    //获取mapper对应的缓存
    Cache cache = ms.getCache();
    if (cache != null) {
      //如果需要刷新缓存就清掉二级缓存
      flushCacheIfRequired(ms);
      //如果使用缓存,且没有resultHandler则先试着从缓存读取结果
      if (ms.isUseCache() && resultHandler == null) {
        ensureNoOutParams(ms, boundSql);
        @SuppressWarnings("unchecked")
        List<E> list = (List<E>) tcm.getObject(cache, key);
        if (list == null) {
          //没有缓存,则由代理继续执行后续步骤
          list = delegate.<E> query(ms, parameterObject, rowBounds, resultHandler, key, boundSql);
          tcm.putObject(cache, key, list); // issue #578 and #116
        }
        return list;
      }
    }
    return delegate.<E> query(ms, parameterObject, rowBounds, resultHandler, key, boundSql);
  }
  
  

基类 BaseExecutor 对查询的处理【处理一级缓存】

 /*详见BaseExecutor.java**/
  @Override
  public <E> List<E> query(MappedStatement ms, Object parameter, RowBounds rowBounds, ResultHandler resultHandler, CacheKey key, BoundSql boundSql) throws SQLException {
    ErrorContext.instance().resource(ms.getResource()).activity("executing a query").object(ms.getId());
    if (closed) {
      throw new ExecutorException("Executor was closed.");
    }
    if (queryStack == 0 && ms.isFlushCacheRequired()) {
      clearLocalCache();
    }
    List<E> list;
    try {
      queryStack++;
      //从一级缓存读取查询结果
      list = resultHandler == null ? (List<E>) localCache.getObject(key) : null;
      if (list != null) {
        handleLocallyCachedOutputParameters(ms, key, parameter, boundSql);
      } else {
        list = queryFromDatabase(ms, parameter, rowBounds, resultHandler, key, boundSql);
      }
    } finally {
      queryStack--;
    }
    if (queryStack == 0) {
      for (DeferredLoad deferredLoad : deferredLoads) {
        deferredLoad.load();
      }
      // issue #601
      deferredLoads.clear();
      //如果LocalCacheScope为STATEMENT,则不缓存
      if (configuration.getLocalCacheScope() == LocalCacheScope.STATEMENT) {
        // issue #482
        clearLocalCache();
      }
    }
    return list;
  }

缓存的的key CacheKey

/*默认实现*/
public class PerpetualCache implements Cache {

  private final String id;
  //存放缓存的数据
  private Map<Object, Object> cache = new HashMap<Object, Object>();
  ...

hashMap判断key是否相等


 if (p.hash == hash &&
                ((k = p.key) == key || (key != null && key.equals(k))))
                e = p;
 ...
 hash值相等 并且 内存地址相等 或者 equals返回true

mybatis CacheKey 实现

package org.apache.ibatis.cache;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;

import org.apache.ibatis.reflection.ArrayUtil;

/**
 * @author Clinton Begin
 */
public class CacheKey implements Cloneable, Serializable {

  private static final long serialVersionUID = 1146682552656046210L;

  public static final CacheKey NULL_CACHE_KEY = new NullCacheKey();

  private static final int DEFAULT_MULTIPLYER = 37;
  private static final int DEFAULT_HASHCODE = 17;

  private final int multiplier;
  private int hashcode;
  private long checksum;
  private int count;
  // 8/21/2017 - Sonarlint flags this as needing to be marked transient.  While true if content is not serializable, this is not always true and thus should not be marked transient.
  private List<Object> updateList;

  public CacheKey() {
    this.hashcode = DEFAULT_HASHCODE;
    this.multiplier = DEFAULT_MULTIPLYER;
    this.count = 0;
    this.updateList = new ArrayList<Object>();
  }

  public CacheKey(Object[] objects) {
    this();
    updateAll(objects);
  }

  public int getUpdateCount() {
    return updateList.size();
  }

  public void update(Object object) {
    int baseHashCode = object == null ? 1 : ArrayUtil.hashCode(object); 

    count++;
    checksum += baseHashCode;
    baseHashCode *= count;

    hashcode = multiplier * hashcode + baseHashCode;

    updateList.add(object);
  }

  public void updateAll(Object[] objects) {
    for (Object o : objects) {
      update(o);
    }
  }

 /*重写equals*/
  @Override
  public boolean equals(Object object) {
    if (this == object) {
      return true;
    }
    if (!(object instanceof CacheKey)) {
      return false;
    }

    final CacheKey cacheKey = (CacheKey) object;

    if (hashcode != cacheKey.hashcode) {
      return false;
    }
    if (checksum != cacheKey.checksum) {
      return false;
    }
    if (count != cacheKey.count) {
      return false;
    }

    for (int i = 0; i < updateList.size(); i++) {
      Object thisObject = updateList.get(i);
      Object thatObject = cacheKey.updateList.get(i);
      if (!ArrayUtil.equals(thisObject, thatObject)) {
        return false;
      }
    }
    return true;
  }

  /*重写hashCode*/
  @Override
  public int hashCode() {
    return hashcode;
  }

  @Override
  public String toString() {
    StringBuilder returnValue = new StringBuilder().append(hashcode).append(':').append(checksum);
    for (Object object : updateList) {
      returnValue.append(':').append(ArrayUtil.toString(object));
    }
    return returnValue.toString();
  }

  @Override
  public CacheKey clone() throws CloneNotSupportedException {
    CacheKey clonedCacheKey = (CacheKey) super.clone();
    clonedCacheKey.updateList = new ArrayList<Object>(updateList);
    return clonedCacheKey;
  }

}

 /**详见BaseExecutor.java*/
  @Override
  public CacheKey createCacheKey(MappedStatement ms, Object parameterObject, RowBounds rowBounds, BoundSql boundSql) {
    if (closed) {
      throw new ExecutorException("Executor was closed.");
    }
    CacheKey cacheKey = new CacheKey();
    //sql的编号
    cacheKey.update(ms.getId());
    //获取的数据位置
    cacheKey.update(rowBounds.getOffset());
    cacheKey.update(rowBounds.getLimit());
    //查询的sql
    cacheKey.update(boundSql.getSql());
    //查询的参数
    List<ParameterMapping> parameterMappings = boundSql.getParameterMappings();
    TypeHandlerRegistry typeHandlerRegistry = ms.getConfiguration().getTypeHandlerRegistry();
    // mimic DefaultParameterHandler logic
    for (ParameterMapping parameterMapping : parameterMappings) {
      if (parameterMapping.getMode() != ParameterMode.OUT) {
        Object value;
        String propertyName = parameterMapping.getProperty();
        if (boundSql.hasAdditionalParameter(propertyName)) {
          value = boundSql.getAdditionalParameter(propertyName);
        } else if (parameterObject == null) {
          value = null;
        } else if (typeHandlerRegistry.hasTypeHandler(parameterObject.getClass())) {
          value = parameterObject;
        } else {
          MetaObject metaObject = configuration.newMetaObject(parameterObject);
          value = metaObject.getValue(propertyName);
        }
        cacheKey.update(value);
      }
    }
    if (configuration.getEnvironment() != null) {
      // issue #176
      //查询的环境
      cacheKey.update(configuration.getEnvironment().getId());
    }
    return cacheKey;
  }
  1. 从上面的源码中简单看下一级缓存,二级缓存的区别

作用域

executor 由sqlSession持有,所以localCache是在session内共享的

public abstract class BaseExecutor implements Executor {

  private static final Log log = LogFactory.getLog(BaseExecutor.class);

  protected Transaction transaction;
  protected Executor wrapper;

  protected ConcurrentLinkedQueue<DeferredLoad> deferredLoads;
  //一级缓存
  protected PerpetualCache localCache;
  protected PerpetualCache localOutputParameterCache;
  protected Configuration configuration;
  ...

从上文中 【CachingExecutor对查询的处理,处理二级缓存】可以发现二级缓存来源于MappedStatement,这个对象只跟mapper相关,必须位于同一个命名空间或者指定一个引用的名称空间的缓存

所以二级缓存的作用域会比一级缓存的小,在mapper范围内

启用方式

一级缓存

public class Configuration {

  ...
  //一级缓存 默认作用域SESSION范围 
  protected LocalCacheScope localCacheScope = LocalCacheScope.SESSION;
  ...

如果设置为 localCacheScope = LocalCacheScope.STATEMENT;一级缓存就会失效,从上文的【基类 BaseExecutor 对查询的处理【处理一级缓存】】中可以看到处理的源码


二级缓存

public class Configuration {

  ...
  //二级缓存默认开启
  protected boolean cacheEnabled = true;
  ...

从上文【通过装饰器模式,包装Executor,丰富Executor的功能】中看到只有cacheEnabled为true时才会使用二级缓存的包装类


3.简单使用示例

一级缓存

/*公共测试类**/
public class BaseTest {

    protected SqlSessionFactory sqlSessionFactory;
    protected SqlSession sqlSession;

    @Before
    public void init(){
        InputStream inputStream;
        try {
            System.getProperties().put("sun.misc.ProxyGenerator.saveGeneratedFiles","true");
            inputStream = Resources.getResourceAsStream("mybatis.xml");
            sqlSessionFactory = new SqlSessionFactoryBuilder().build(inputStream);
            sqlSession = sqlSessionFactory.openSession();
        } catch (IOException e) {
            //nothing to do
        }
    }

    @After
    public void close(){
        sqlSession.close();
    }
}

测试使用一级缓存

关闭二级缓存
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE configuration
        PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
        "http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
...
    <settings>
        <setting name="cacheEnabled" value="false"/>
    </settings>
   ...
</configuration>
public class CacheTest extends BaseTest {
    /*
    * 测试一级缓存
    * */
    @Test
    public void testCache1(){
        CachedAuthorMapper cachedAuthorMapper = sqlSession.getMapper(CachedAuthorMapper.class);
        cachedAuthorMapper.search(1,1);
        cachedAuthorMapper.search(1,1);
    }
}
执行结果
DEBUG [main] - ==>  Preparing: select p.id as post_id,a.id,a.author_id,a.title,r.username,p.`comment` from article a,author r,post p WHERE 1 = 1 and a.author_id = r.id and p.article_id = a.id and p.article_id = ? 
DEBUG [main] - ==> Parameters: 1(Long)
DEBUG [main] - <==      Total: 2

查询两次 只执行了一次数据库操作

测试关闭一级缓存

<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE configuration
        PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
        "http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
    <settings>
        <setting name="localCacheScope" value="STATEMENT"/>
        <setting name="cacheEnabled" value="false"/>
    </settings>
</configuration>
执行结果
DEBUG [main] - ==>  Preparing: select p.id as post_id,a.id,a.author_id,a.title,r.username,p.`comment` from article a,author r,post p WHERE 1 = 1 and a.author_id = r.id and p.article_id = a.id and p.article_id = ? 
DEBUG [main] - ==> Parameters: 1(Long)
DEBUG [main] - <==      Total: 2
DEBUG [main] - ==>  Preparing: select p.id as post_id,a.id,a.author_id,a.title,r.username,p.`comment` from article a,author r,post p WHERE 1 = 1 and a.author_id = r.id and p.article_id = a.id and p.article_id = ? 
DEBUG [main] - ==> Parameters: 1(Long)
DEBUG [main] - <==      Total: 2

查询了两次

测试二级缓存的使用

关闭一级缓存

<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE configuration
        PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
        "http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
    <settings>
        <setting name="localCacheScope" value="STATEMENT"/>
    </settings>
</configuration>

配置mapper启用缓存
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE mapper
    PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
    "http://mybatis.org/dtd/mybatis-3-mapper.dtd">

<mapper namespace="test.CachedAuthorMapper">
 ...
  <cache/>
  ...
 </mapper>
执行结果
DEBUG [main] - ==>  Preparing: select * from post where id >0 
DEBUG [main] - ==> Parameters: 
DEBUG [main] - <==      Total: 4

DEBUG [main] - Cache Hit Ratio [ddshuai.CachedAuthorMapper]: 0.5

查询两次 只执行了一次数据库操作 缓存命中率50%

使用二级缓存稍有区别

public class CacheTest extends BaseTest {
    
    /*
     * 测试二级缓存
     * */
    @Test
    public void testCache2(){
        CachedAuthorMapper cachedAuthorMapper = sqlSession.getMapper(CachedAuthorMapper.class);
        cachedAuthorMapper.queryPosts();
         //必须执行,否则二级缓存不会生效
        sqlSession.commit();
        cachedAuthorMapper.queryPosts();

    }
}

为什么需要执行commit缓存才会生效,个人理解是避免缓存脏数据

package org.apache.ibatis.cache.decorators;

import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.ReadWriteLock;

import org.apache.ibatis.cache.Cache;
import org.apache.ibatis.logging.Log;
import org.apache.ibatis.logging.LogFactory;

public class TransactionalCache implements Cache {

  private static final Log log = LogFactory.getLog(TransactionalCache.class);

  //真正的缓存对象
  private final Cache delegate;
  //是否提交事务的时候清空缓存
  private boolean clearOnCommit;
  //待添加到缓存的数据
  private final Map<Object, Object> entriesToAddOnCommit;
  //缓存里没有的key
  private final Set<Object> entriesMissedInCache;

  public TransactionalCache(Cache delegate) {
    this.delegate = delegate;
    this.clearOnCommit = false;
    this.entriesToAddOnCommit = new HashMap<Object, Object>();
    this.entriesMissedInCache = new HashSet<Object>();
  }

  @Override
  public String getId() {
    return delegate.getId();
  }

  @Override
  public int getSize() {
    return delegate.getSize();
  }

  @Override
  public Object getObject(Object key) {
    // issue #116
    Object object = delegate.getObject(key);
    if (object == null) {
      entriesMissedInCache.add(key);
    }
    // issue #146
    if (clearOnCommit) {
      return null;
    } else {
      return object;
    }
  }

  @Override
  public ReadWriteLock getReadWriteLock() {
    return null;
  }

  /**
   * 添加到entriesToAddOnCommit集合
   * @param key Can be any object but usually it is a {@link CacheKey}
   * @param object
   */
  @Override
  public void putObject(Object key, Object object) {
    entriesToAddOnCommit.put(key, object);
  }

  @Override
  public Object removeObject(Object key) {
    return null;
  }

  @Override
  public void clear() {
    clearOnCommit = true;
    entriesToAddOnCommit.clear();
  }

  /**
   * 提交的时候刷新之前的待缓存数据到实际缓存中
   */
  public void commit() {
    if (clearOnCommit) {
      delegate.clear();
    }
    flushPendingEntries();
    reset();
  }

  public void rollback() {
    unlockMissedEntries();
    reset();
  }

  private void reset() {
    clearOnCommit = false;
    entriesToAddOnCommit.clear();
    entriesMissedInCache.clear();
  }

  /**
   * 添加到实际缓存
   */
  private void flushPendingEntries() {
    for (Map.Entry<Object, Object> entry : entriesToAddOnCommit.entrySet()) {
      delegate.putObject(entry.getKey(), entry.getValue());
    }
    for (Object entry : entriesMissedInCache) {
      if (!entriesToAddOnCommit.containsKey(entry)) {
        delegate.putObject(entry, null);
      }
    }
  }

  private void unlockMissedEntries() {
    for (Object entry : entriesMissedInCache) {
      try {
        delegate.removeObject(entry);
      } catch (Exception e) {
        log.warn("Unexpected exception while notifiying a rollback to the cache adapter."
            + "Consider upgrading your cache adapter to the latest version.  Cause: " + e);
      }
    }
  }

}

mapper配置缓存有两种方式 cache-ref,cache

cache 上面使用了,一般都是这种方式,那么cache-ref有什么应用场景呢

很多时候我们的操作可能不是那么单一,也不是唯一一个地方能引起缓存的变化,比如有些中间表,可能就会出现在不同的mapper映射中,那么这时候如果单独放在自己的名称空间的缓存下势必会产生一些数据不一致问题【小注:一级缓存不会产生这种问题,因为任何的mapper操作数据库的更新,都会引起缓存的刷新】,那么这些个有关联性的mapper映射就可以引用同一个缓存,来达到缓存一致性,因为无论是哪个mapper的更新操作都会刷新他们共有的缓存

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值