赞
踩
优势:
二级缓存优先使用本地缓存,访问数据非常快,有效减少和远程缓存之间的数据交换,节约网络开销。
问题:
分布式环境下本地缓存存在一致性问题,本地缓存变更后需要通知其他节点刷新本地缓存,这对一致性要求高的场景可能不能很好的适应。
本地缓存:Caffeine,Guava Cache
远程缓存:Redis,MemCache
开启缓存:@EnableCaching
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-cache</artifactId>
</dependency>
SpringBoot Cache 声明式缓存注解:
@Cacheable:执行方法前,先从缓存中获取,没有获取到才执行方法,并将其结果更新到缓存。
常用属性:
@CachePut:执行方法后,将其结果更新到缓存
@CacheEvict:执行方法后,清除缓存
@Caching:组合前三个注解
SpEL 支持的表达式
#result:返回方法执行后的返回值。
依赖
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>
Cache<String, Object> cache = Caffeine.newBuilder().build();
根据最大容量
Cache<String, Object> cache = Caffeine.newBuilder()
.maximumSize(10000)
.build();
根据权重:
Cache<String, Object> cache = Caffeine.newBuilder()
.maximumWeight(10000)
.weigher((Weigher<String, Object>) (s, o) -> {
// 根据不同对象计算权重
return 0;
})
.build();
基于弱引用,当不存在强引用时淘汰
Cache<String, Object> cache = Caffeine.newBuilder()
.weakKeys()
.weakValues()
.build();
基于软引用,当不存在强引用且内存不足时淘汰
Cache<String, Object> cache = Caffeine.newBuilder()
.softValues()
.build();
基于过期时间
Cache<String, Object> cache = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.build();
Cache<String, Object> cache = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.build();
// 存入
cache.put("key1", "123");
// 取出
Object key1Obj = cache.getIfPresent("key1");
// 清除
cache.invalidate("key1");
// 清除全部
cache.invalidateAll();
响应结果通过 CompletableFuture 包装,利用线程池异步执行
AsyncCache<String, Object> asyncCache = Caffeine.newBuilder() .expireAfterWrite(5, TimeUnit.SECONDS) .buildAsync(); // 存入 asyncCache.put("key1", CompletableFuture.supplyAsync(() -> "123")); // 取出 CompletableFuture<Object> key1Future = asyncCache.getIfPresent("key1"); try { Object key1Obj = key1Future.get(); } catch (InterruptedException | ExecutionException e) { // } // 清除 asyncCache.synchronous().invalidate("key1"); // 清除全部 asyncCache.synchronous().invalidateAll();
和普通缓存使用方式一致,在缓存未命中时,自动加载数据到缓存,需要设置加载数据的回调,比如从数据库查询数据。
LoadingCache<String, Object> cache = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.build(key -> {
// 获取业务数据
return "Data From DB";
});
和异步缓存使用方式一致,在缓存未命中时,自动加载数据到缓存,与 Loading Cache 不同的是,加载数据是异步的。
// 使用 AsyncCache 的线程池异步加载
AsyncLoadingCache<String, Object> asyncCache0 = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.buildAsync(key -> {
// 获取业务数据
return "Data From DB";
});
// 指定加载使用的线程池
AsyncLoadingCache<String, Object> asyncCache1 = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.buildAsync((key, executor) -> CompletableFuture.supplyAsync(() -> {
// 异步获取业务数据
return "Data From DB";
}, otherExecutor));
注意:AsyncLoadingCache 不支持弱引用和软引用相关淘汰策略
Caffeine 可通过 refreshAfterWrite 设置定时刷新
LoadingCache<String, Object> cache = Caffeine.newBuilder()
.expireAfterWrite(5, TimeUnit.SECONDS)
.refreshAfterWrite(3, TimeUnit.SECONDS)
.build(key -> {
// 获取业务数据
return "Data From DB";
});
<dependencies> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> <!--redis--> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-data-redis</artifactId> </dependency> <!--cache--> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-cache</artifactId> </dependency> <!--caffeine--> <dependency> <groupId>com.github.ben-manes.caffeine</groupId> <artifactId>caffeine</artifactId> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-lang3</artifactId> </dependency> <dependency> <groupId>cn.hutool</groupId> <artifactId>hutool-core</artifactId> <version>5.8.21</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>3.0.0</version> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-test</artifactId> <scope>test</scope> </dependency> </dependencies>
@Slf4j @Getter public class DLCache extends AbstractValueAdaptingCache { private final String name; private final long expiration; private final DLCacheProperties cacheProperties; private final Cache<String, Object> caffeineCache; private final RedisTemplate<String, Object> redisTemplate; public DLCache(String name, long expiration, DLCacheProperties cacheProperties, Cache<String, Object> caffeineCache, RedisTemplate<String, Object> redisTemplate) { super(cacheProperties.isAllowNullValues()); this.name = name; this.expiration = expiration; this.cacheProperties = cacheProperties; this.caffeineCache = caffeineCache; this.redisTemplate = redisTemplate; } @Override public String getName() { return name; } @Override public Object getNativeCache() { return this; } /** * 使用 @Cacheable 注解的时候,会执行这个查询逻辑。 * <p> * 从本地缓存 Caffeine 取数,有就直接返回 * 从 Redis 取数,有就插入本地缓存,并返回 * 都没有则运行程序本来的逻辑,并执行 put 方法 * * @param key * @return */ @Override protected Object lookup(Object key) { String redisKey = getRedisKey(key); Object val; val = caffeineCache.getIfPresent(key); // val 是 toStoreValue 包装过的值,为 null 则 key 不存在 // 因为存储的 null 值被包装成了 DLCacheNullVal.INSTANCE if (ObjectUtil.isNotNull(val)) { log.debug("DLCache local get cache, key:{}, value:{}", key, val); return val; } val = redisTemplate.opsForValue().get(redisKey); if (ObjectUtil.isNotNull(val)) { log.debug("DLCache remote get cache, key:{}, value:{}", key, val); caffeineCache.put(key.toString(), val); return val; } return val; } /** * 使用注解时不走这个方法,实际走父类的 get 方法。 * * 且这个方法,父类要求实现此接口的时候需要自己保证同步 * @param key * @param valueLoader * @return * @param <T> */ @SuppressWarnings("unchecked") @Override public <T> T get(Object key, Callable<T> valueLoader) { T val; val = (T) lookup(key); if (ObjectUtil.isNotNull(val)) { return val; } // 双检锁 synchronized (key.toString().intern()) { val = (T) lookup(key); if (ObjectUtil.isNotNull(val)) { return val; } try { // 拦截的业务方法 val = valueLoader.call(); // 加入缓存 put(key, val); } catch (Exception e) { throw new DLCacheException("DLCache valueLoader fail", e); } return val; } } /** * 当有更新操作或者像上面那样两个缓存都没有数据的时候会调用 put 方法。 * <p> * 先保存 caffeine ,再保存 Redis ,如果为 null 就保存在 caffeine 不用保存 Redis ,防止缓存穿透。 * 通知其他节点更新缓存。 * * @param key * @param value */ @Override public void put(Object key, Object value) { putRemote(key, value); sendSyncMsg(key); putLocal(key, value); } /** * 删除操作会调用,就是直接删除当前缓存数据,并通知其他节点删除 * @param key */ @Override public void evict(Object key) { // 先清理 redis 再清理 caffeine clearRemote(key); sendSyncMsg(key); clearLocal(key); } /** * 和 evit 一样。只不过它删除所有的缓存。 * 这里需要注意 redis.keys 命令一般生产环境是禁用的,所以我们需要使用 scan 替换下。 */ @Override public void clear() { // 先清理 redis 再清理 caffeine clearRemote(null); sendSyncMsg(null); clearLocal(null); } private void sendSyncMsg(Object key) { String syncTopic = cacheProperties.getRemote().getSyncTopic(); DLCacheRefreshMsg refreshMsg = new DLCacheRefreshMsg(name, key); // 加入 SELF_MSG_MAP 防止自身节点重复处理 DLCacheRefreshListener.SELF_MSG_MAP.add(refreshMsg); redisTemplate.convertAndSend(syncTopic, refreshMsg); } private void putLocal(Object key, Object value) { // toStoreValue 包装 null 值 caffeineCache.put(key.toString(), toStoreValue(value)); } private void putRemote(Object key, Object value) { if (expiration > 0) { // toStoreValue 包装 null 值 redisTemplate.opsForValue().set(getRedisKey(key), toStoreValue(value), expiration, TimeUnit.MILLISECONDS); return; } redisTemplate.opsForValue().set(getRedisKey(key), toStoreValue(value)); } public void clearRemote(Object key) { if (ObjectUtil.isNull(key)) { Set<String> keys = redisTemplate.keys(getRedisKey("*")); if (ObjectUtil.isNotEmpty(keys)) { assert keys != null; keys.forEach(redisTemplate::delete); } return; } redisTemplate.delete(getRedisKey(key)); } public void clearLocal(Object key) { if (ObjectUtil.isNull(key)) { caffeineCache.invalidateAll(); return; } caffeineCache.invalidate(key); } /** * 检查是否允许缓存 null * * @param value 缓存值 * @return 不为空则 true,为空但允许则 false,否则异常 */ private boolean checkValNotNull(Object value) { if (ObjectUtil.isNotNull(value)) { return true; } if (isAllowNullValues() && ObjectUtil.isNull(value)) { return false; } // val 不能为空,但传了空 throw new DLCacheException("Check null val is not allowed"); } @Override protected Object fromStoreValue(Object storeValue) { if (isAllowNullValues() && DLCacheNullVal.INSTANCE.equals(storeValue)) { return null; } return storeValue; } @Override protected Object toStoreValue(Object userValue) { if (!checkValNotNull(userValue)) { return DLCacheNullVal.INSTANCE; } return userValue; } /** * 获取 redis 完整 key */ private String getRedisKey(Object key) { // 双冒号,与 spring cache 默认一致 return this.name.concat("::").concat(key.toString()); } /** * 在缓存时代替 null 值,以区分是 key 不存在还是 val 为 null */ @Data public static class DLCacheNullVal { public static final DLCacheNullVal INSTANCE = new DLCacheNullVal(); private String desc = "nullVal"; } }
@Slf4j @RequiredArgsConstructor public class DLCacheManager implements CacheManager { private final ConcurrentHashMap<String, DLCache> cacheMap = new ConcurrentHashMap<>(); private final DLCacheProperties cacheProperties; private final RedisTemplate<String, Object> redisTemplate; @Override public DLCache getCache(String name) { return cacheMap.computeIfAbsent(name, (o) -> { DLCache dlCache = buildCache(o); log.debug("Create DLCache instance, name:{}", o); return dlCache; }); } private DLCache buildCache(String name) { Caffeine<Object, Object> caffeine = Caffeine.newBuilder(); // 设置过期时间 expireAfterWrite long expiration = 0; // 获取针对 cache name 设置的过期时间 Map<String, Long> cacheExpirationMap = cacheProperties.getCacheExpirationMap(); if (ObjectUtil.isNotEmpty(cacheExpirationMap) && cacheExpirationMap.get(name) > 0) { expiration = cacheExpirationMap.get(name); } else if (cacheProperties.getDefaultExpiration() > 0) { expiration = cacheProperties.getDefaultExpiration(); } if (expiration > 0) { caffeine.expireAfterWrite(expiration, TimeUnit.MILLISECONDS); } // 设置参数 DLCacheProperties.LocalConfig localConfig = cacheProperties.getLocal(); if (ObjectUtil.isNotNull(localConfig.getInitialCapacity()) && localConfig.getInitialCapacity() > 0) { caffeine.initialCapacity(localConfig.getInitialCapacity()); } if (ObjectUtil.isNotNull(localConfig.getMaximumSize()) && localConfig.getMaximumSize() > 0) { caffeine.maximumSize(localConfig.getMaximumSize()); } return new DLCache(name, expiration, cacheProperties, caffeine.build(), redisTemplate); } @Override public Collection<String> getCacheNames() { return Collections.unmodifiableSet(cacheMap.keySet()); } }
@Data @Component @ConfigurationProperties(prefix = "test.cache.dl") public class DLCacheProperties { /** * 是否存储 null 值 */ private boolean allowNullValues = true; /** * 过期时间,为 0 表示不过期,默认 30 分钟 * 单位:毫秒 */ private long defaultExpiration = 30 * 60 * 1000; /** * 针对 cacheName 设置过期时间,为 0 表示不过期 * 单位:毫秒 */ private Map<String, Long> cacheExpirationMap; /** * 本地缓存 caffeine 配置 */ private LocalConfig local = new LocalConfig(); /** * 远程缓存 redis 配置 */ private RemoteConfig remote = new RemoteConfig(); @Data public static class LocalConfig { /** * 初始化大小,为 0 表示默认 */ private int initialCapacity; /** * 最大缓存个数,为 0 表示默认 * 默认最多 5 万条 */ private long maximumSize = 10000L; } @Data public static class RemoteConfig { /** * Redis pub/sub 缓存刷新通知主题 */ private String syncTopic = "cache:dl:refresh:topic"; } }
@Slf4j @RequiredArgsConstructor @Component public class DLCacheRefreshListener implements MessageListener, InitializingBean { public static final ConcurrentHashSet<DLCacheRefreshMsg> SELF_MSG_MAP = new ConcurrentHashSet<>(); private final DLCacheManager dlCacheManager; private final DLCacheProperties cacheProperties; private final RedisMessageListenerContainer listenerContainer; private final RedisTemplate<String, Object> redisTemplate; @Override public void onMessage(Message message, byte[] pattern) { // 序列化出刷新消息 DLCacheRefreshMsg refreshMsg = (DLCacheRefreshMsg) redisTemplate.getValueSerializer().deserialize(message.getBody()); if (ObjectUtil.isNull(refreshMsg)) { return; } // 判断是不是自身节点发出 if (SELF_MSG_MAP.contains(refreshMsg)) { SELF_MSG_MAP.remove(refreshMsg); return; } log.debug("DLCache refresh local, cache name:{}, key:{}", refreshMsg.getCacheName(), refreshMsg.getKey()); // 清理本地缓存 dlCacheManager.getCache(refreshMsg.getCacheName()).clearLocal(refreshMsg.getKey()); } @Override public void afterPropertiesSet() { // 注册到 RedisMessageListenerContainer listenerContainer.addMessageListener(this, new ChannelTopic(cacheProperties.getRemote().getSyncTopic())); } }
@Data
@AllArgsConstructor
@NoArgsConstructor
@Builder
@Accessors(chain = true)
public class DLCacheRefreshMsg {
private String cacheName;
private Object key;
}
@Configuration public class CacheConfig { @Bean(name = "dlCacheManager") public DLCacheManager dlCacheManager(DLCacheProperties cacheProperties, RedisTemplate<String, Object> redisTemplate) { return new DLCacheManager(cacheProperties, redisTemplate); } @Bean public Cache<String, Object> caffeineCache() { return Caffeine.newBuilder() .expireAfterWrite(5, TimeUnit.SECONDS) .build(); } @Bean public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory redisConnectionFactory) { RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>(); redisTemplate.setKeySerializer(new StringRedisSerializer()); redisTemplate.setHashKeySerializer(new StringRedisSerializer()); redisTemplate.setHashValueSerializer(new GenericJackson2JsonRedisSerializer()); redisTemplate.setValueSerializer(new GenericJackson2JsonRedisSerializer()); redisTemplate.setConnectionFactory(redisConnectionFactory); return redisTemplate; } /** * 配置 RedisMessageListenerContainer 监听器容器 * * @param connectionFactory 连接工厂 * @return */ @Bean public RedisMessageListenerContainer redisMessageListenerContainer(RedisConnectionFactory connectionFactory) { RedisMessageListenerContainer container = new RedisMessageListenerContainer(); container.setConnectionFactory(connectionFactory); return container; } }
github:https://github.com/YuriLuo/J2Cache
推荐查看官方文档,写的很详细
L1: 进程内缓存 caffeine/ehcache
L2: 集中式缓存 Redis/Memcached
<!--起步依赖-->
<dependency>
<groupId>net.oschina.j2cache</groupId>
<artifactId>j2cache-spring-boot2-starter</artifactId>
<version>2.8.0-release</version>
</dependency>
<!--j2cache的核心包-->
<dependency>
<groupId>net.oschina.j2cache</groupId>
<artifactId>j2cache-core</artifactId>
<version>2.8.5-release</version>
</dependency>
server:
port: 9000
spring:
redis:
host: IP
password:
port: 6379
database: 0
j2cache:
config-location: J2Cache.properties
######################################### # Cache Broadcast Method # values: # jgroups -> use jgroups's multicast # redis -> use redis publish/subscribe mechanism (using jedis) # lettuce -> use redis publish/subscribe mechanism (using lettuce, Recommend) # rabbitmq -> use RabbitMQ publisher/consumer mechanism # rocketmq -> use RocketMQ publisher/consumer mechanism # none -> don't notify the other nodes in cluster # xx.xxxx.xxxx.Xxxxx your own cache broadcast policy classname that implement net.oschina.j2cache.cluster.ClusterPolicy ######################################### j2cache.broadcast = redis ######################################### # Level 1&2 provider # values: # none -> disable this level cache # ehcache -> use ehcache2 as level 1 cache # ehcache3 -> use ehcache3 as level 1 cache # caffeine -> use caffeine as level 1 cache(only in memory) # redis -> use redis as level 2 cache (using jedis) # lettuce -> use redis as level 2 cache (using lettuce) # readonly-redis -> use redis as level 2 cache ,but never write data to it. if use this provider, you must uncomment `j2cache.L2.config_section` to make the redis configurations available. # memcached -> use memcached as level 2 cache (xmemcached), # [classname] -> use custom provider ######################################### j2cache.L1.provider_class = caffeine j2cache.L2.provider_class = redis # When L2 provider isn't `redis`, using `L2.config_section = redis` to read redis configurations # j2cache.L2.config_section = redis # Enable/Disable ttl in redis cache data (if disabled, the object in redis will never expire, default:true) # NOTICE: redis hash mode (redis.storage = hash) do not support this feature) j2cache.sync_ttl_to_redis = true # Whether to cache null objects by default (default false) j2cache.default_cache_null_object = true ######################################### # Cache Serialization Provider # values: # fst -> using fast-serialization (recommend) # kryo -> using kryo serialization # json -> using fst's json serialization (testing) # fastjson -> using fastjson serialization (embed non-static class not support) # java -> java standard # fse -> using fse serialization # [classname implements Serializer] ######################################### j2cache.serialization = json #json.map.person = net.oschina.j2cache.demo.Person ######################################### # Caffeine configuration # caffeine.region.[name] = size, xxxx[s|m|h|d] # ######################################### caffeine.properties = /caffeine.properties ######################################### # Redis connection configuration ######################################### ######################################### # Redis Cluster Mode # # single -> single redis server # sentinel -> master-slaves servers # cluster -> cluster servers (\u6570\u636e\u5e93\u914d\u7f6e\u65e0\u6548\uff0c\u4f7f\u7528 database = 0\uff09 # sharded -> sharded servers (\u5bc6\u7801\u3001\u6570\u636e\u5e93\u5fc5\u987b\u5728 hosts \u4e2d\u6307\u5b9a\uff0c\u4e14\u8fde\u63a5\u6c60\u914d\u7f6e\u65e0\u6548 ; redis://user:password@127.0.0.1:6379/0\uff09 # ######################################### redis.mode = single #redis storage mode (generic|hash) redis.storage = generic ## redis pub/sub channel name redis.channel = j2cache ## redis pub/sub server (using redis.hosts when empty) redis.channel.host = #cluster name just for sharded redis.cluster_name = j2cache ## redis cache namespace optional, default[empty] redis.namespace = ## redis command scan parameter count, default[1000] #redis.scanCount = 1000 ## connection # Separate multiple redis nodes with commas, such as 192.168.0.10:6379,192.168.0.11:6379,192.168.0.12:6379 redis.hosts = 127.0.0.1:6379 redis.timeout = 2000 redis.password = redis.database = 0 redis.ssl = false ## redis pool properties redis.maxTotal = 100 redis.maxIdle = 10 redis.maxWaitMillis = 5000 redis.minEvictableIdleTimeMillis = 60000 redis.minIdle = 1 redis.numTestsPerEvictionRun = 10 redis.lifo = false redis.softMinEvictableIdleTimeMillis = 10 redis.testOnBorrow = true redis.testOnReturn = false redis.testWhileIdle = true redis.timeBetweenEvictionRunsMillis = 300000 redis.blockWhenExhausted = false redis.jmxEnabled = false ######################################### # Lettuce scheme # # redis -> single redis server # rediss -> single redis server with ssl # redis-sentinel -> redis sentinel # redis-cluster -> cluster servers # ######################################### ######################################### # Lettuce Mode # # single -> single redis server # sentinel -> master-slaves servers # cluster -> cluster servers (\u6570\u636e\u5e93\u914d\u7f6e\u65e0\u6548\uff0c\u4f7f\u7528 database = 0\uff09 # sharded -> sharded servers (\u5bc6\u7801\u3001\u6570\u636e\u5e93\u5fc5\u987b\u5728 hosts \u4e2d\u6307\u5b9a\uff0c\u4e14\u8fde\u63a5\u6c60\u914d\u7f6e\u65e0\u6548 ; redis://user:password@127.0.0.1:6379/0\uff09 # ######################################### ## redis command scan parameter count, default[1000] #lettuce.scanCount = 1000 lettuce.mode = single lettuce.namespace = lettuce.storage = hash lettuce.channel = j2cache lettuce.scheme = redis lettuce.hosts = 127.0.0.1:6379 lettuce.password = lettuce.database = 0 lettuce.sentinelMasterId = lettuce.sentinelPassword = lettuce.maxTotal = 100 lettuce.maxIdle = 10 lettuce.minIdle = 10 # timeout in milliseconds lettuce.timeout = 10000 # redis cluster topology refresh interval in milliseconds lettuce.clusterTopologyRefresh = 3000
创建/resources/caffeine.properties文件
#########################################
# Caffeine configuration
# [name] = size, xxxx[s|m|h|d]
#########################################
default = 1000, 30m
public static void main(String[] args) {
CacheChannel cache = J2Cache.getChannel();
//缓存操作
cache.set("default", "1", "Hello J2Cache");
System.out.println(cache.get("default", "1"));
cache.evict("default", "1");
System.out.println(cache.get("default", "1"));
cache.close();
}
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。