redis搭建集群模式、Cluster模式(6节点,3主3从集群模式,添加删除节点)_redis cluster节点带数据增减-CSDN博客
Linux部署Redis哨兵集群 一主两从三哨兵(这里使用Redis6,其它版本类似)_linux redis集群模式部署-CSDN博客
配置yaml
redis:
redis-configs:
redis-order:
type: sentinel #standalone cluster
hostAndPort: 192.168.132.1:16379,192.168.132.1:16380,192.168.132.1:16381
masterName: mymaster
password: dyj1
username:
database: 15
timeout: 10000
pool:
max-idle: 8
min-idle: 0
max-active: 8
max-wait: 10000
# redis-pay:
# type: standalone
# hostAndPort: localhost:6380
# database: 14
# timeout: 10000
# pool:
# max-idle: 8 # 连接池中的最大空闲连接 默认 8
# min-idle: 0 # 连接池中的最小空闲连接 默认 0
# max-active: 8 # 连接池最大连接数(使用负值表示没有限制) 默认 8
# max-wait: 10000 # 连接池最大阻塞等待时间(使用负值表示没有限制) 默认 -1
# redis-order-cluster:
# type: cluster
# hostAndPort: xxx:6379,xxx:6379,xxx:6379
# database: 15
# timeout: 10000
# max-redirects: 3
# pool:
# max-idle: 8
# min-idle: 0
# max-active: 8
# max-wait: 10000
# redis-pay-cluster:
# type: cluster
# hostAndPort: xxx:6379,xxx:6379,xxx:6379
# database: 14
# timeout: 10000
# pool:
# max-idle: 8
# min-idle: 0
# max-active: 8
# max-wait: 10000
cache:
caffeine:
cache10M: "initialCapacity=20,maximumSize=100,expireAfterWrite=10m,recordStats"
cache30s: "initialCapacity=20,maximumSize=100,expireAfterWrite=30s"
redis:
cache10M: "10"
cache30s: "30"
redis连接池
package org.example.redis.config;
import freemarker.template.utility.StringUtil;
import io.lettuce.core.ClientOptions;
import io.lettuce.core.cluster.ClusterClientOptions;
import io.lettuce.core.cluster.ClusterTopologyRefreshOptions;
import lombok.Data;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.*;
import org.springframework.data.redis.connection.lettuce.LettuceClientConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnection;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.connection.lettuce.LettucePoolingClientConfiguration;
import org.springframework.data.util.Pair;
import javax.annotation.PostConstruct;
import java.time.Duration;
import java.util.*;
@Configuration
@Data
@EnableConfigurationProperties({RedisPoolConfig.class})
public class RedisConnectionFactoryConfig {
public static Map<String, LettuceConnectionFactory> redisConnectionFactors = new HashMap<>();
@Autowired
private RedisPoolConfig redisPoolConfig;
@PostConstruct
public void init() {
redisPoolConfig.getRedisConfigs().forEach((name, config) -> {
LettuceConnectionFactory redisConnectionFactory = null;
LettuceClientConfiguration clientConfig = getClientConfiguration(config);
switch (config.getType()) {
case "standalone":
RedisStandaloneConfiguration redisStandaloneConfiguration = createRedisStandaloneConfiguration(
config);
if (redisStandaloneConfiguration != null) {
redisConnectionFactory = new LettuceConnectionFactory(redisStandaloneConfiguration, clientConfig);
}
break;
case "cluster":
RedisClusterConfiguration redisClusterConfiguration = createRedisClusterConfiguration(config);
if (redisClusterConfiguration != null) {
redisConnectionFactory = new LettuceConnectionFactory(redisClusterConfiguration, clientConfig);
}
break;
case "sentinel":
RedisSentinelConfiguration redisSentinelConfiguration = createRedisSentinelConfiguration(config);
if (redisSentinelConfiguration != null) {
redisConnectionFactory = new LettuceConnectionFactory(redisSentinelConfiguration, clientConfig);
}
break;
default:
System.out.printf("Unknown type: %d\n", config.getType());
break;
}
if (null != redisConnectionFactory) {
// 在获取连接时,先验证连接是否已经中断,如果已经中断则创建一个新的连接
redisConnectionFactory.setValidateConnection(true);
redisConnectionFactory.afterPropertiesSet(); // start() for spring-data-redis-3.X; afterPropertiesSet() for spring-data-redis-2.X
redisConnectionFactors.putIfAbsent(name, redisConnectionFactory);
}
});
}
private LettuceClientConfiguration getClientConfiguration(RedisPoolConfig.Config config) {
GenericObjectPoolConfig<LettuceConnection> poolConfig = new GenericObjectPoolConfig<>();
if (StringUtils.isNotBlank(config.getPool().getMaxActive())) {
poolConfig.setMaxTotal(Integer.parseInt(config.getPool().getMaxActive()));
}
if (StringUtils.isNotBlank(config.getPool().getMaxWait())) {
poolConfig.setMaxWait(Duration.ofMillis(Integer.parseInt(config.getPool().getMaxWait())));
}
if (StringUtils.isNotBlank(config.getPool().getMaxIdle())) {
poolConfig.setMaxIdle(Integer.parseInt(config.getPool().getMaxIdle()));
}
if (StringUtils.isNotBlank(config.getPool().getMinIdle())) {
poolConfig.setMinIdle(Integer.parseInt(config.getPool().getMinIdle()));
}
int timeout = -1;
if (StringUtils.isNotBlank(config.getTimeout())) {
timeout = Integer.parseInt(config.getTimeout());
}
if (StringUtils.equals(config.getType(), "cluster")){
// 支持自适应集群拓扑刷新和动态刷新源
ClusterTopologyRefreshOptions clusterTopologyRefreshOptions = ClusterTopologyRefreshOptions.builder()
.enableAllAdaptiveRefreshTriggers()
// 开启自适应刷新
.enableAdaptiveRefreshTrigger()
// 开启定时刷新 default 60 SECONDS
.enablePeriodicRefresh(Duration.ofSeconds(5))
.build();
ClusterClientOptions clusterClientOptions = ClusterClientOptions.builder()
.topologyRefreshOptions(clusterTopologyRefreshOptions)
// RedisTemplate通过StatefulRedisClusterConnection发送GET命令到Redis集群。
// 根据Redis集群的哈希槽机制,命令被路由到正确的节点。
// 如果键不存在于当前节点,会触发重定向(最多max-redirects次)直到找到正确的节点或者达到最大重定向次数。
//.maxRedirects()
.build();
LettuceClientConfiguration lettuceClientConfiguration = LettucePoolingClientConfiguration.builder()
.poolConfig(poolConfig)
//.readFrom(ReadFrom.SLAVE_PREFERRED) //读写分离:主写从读模式配置
.clientOptions(clusterClientOptions).build();
return lettuceClientConfiguration;
}
LettuceClientConfiguration clientConfig = LettucePoolingClientConfiguration.builder()
.shutdownTimeout(Duration.ofMillis(timeout))
.poolConfig(poolConfig).build();
return clientConfig;
}
private RedisSentinelConfiguration createRedisSentinelConfiguration(RedisPoolConfig.Config config) {
RedisSentinelConfiguration redisSentinelConfiguration = new RedisSentinelConfiguration();
redisSentinelConfiguration.setMaster(config.getMasterName());
List<Pair<String, Integer>> hostAndPorts = parseClusterHostAndPort(config.getHostAndPort());
if (hostAndPorts.isEmpty()) {
return null;
}
for (Pair<String, Integer> hostAndPort : hostAndPorts) {
RedisNode.RedisNodeBuilder builder = RedisNode.newRedisNode()
// .promotedAs(RedisNode.NodeType.SLAVE)
.listeningAt(hostAndPort.getFirst(), hostAndPort.getSecond());
redisSentinelConfiguration.addSentinel(builder.build());
}
setUsername(config, redisSentinelConfiguration);
setPassword(config, redisSentinelConfiguration);
setDatabase(config, redisSentinelConfiguration);
return redisSentinelConfiguration;
}
private RedisClusterConfiguration createRedisClusterConfiguration(RedisPoolConfig.Config config) {
List<Pair<String, Integer>> hostAndPorts = parseClusterHostAndPort(config.getHostAndPort());
if (hostAndPorts.isEmpty()) {
return null;
}
RedisClusterConfiguration redisClusterConfiguration = new RedisClusterConfiguration();
for (Pair<String, Integer> hostAndPort : hostAndPorts) {
RedisNode node = new RedisNode(hostAndPort.getFirst(), hostAndPort.getSecond());
redisClusterConfiguration.addClusterNode(node);
}
setUsername(config, redisClusterConfiguration);
setPassword(config, redisClusterConfiguration);
setClusterConf(config, redisClusterConfiguration);
return redisClusterConfiguration;
}
private RedisStandaloneConfiguration createRedisStandaloneConfiguration(RedisPoolConfig.Config config) {
Pair<String, Integer> hostAndPort = parseHostAndPort(config.getHostAndPort());
if (null == hostAndPort) {
return null;
}
RedisStandaloneConfiguration redisStandaloneConfiguration = new RedisStandaloneConfiguration();
redisStandaloneConfiguration.setHostName(hostAndPort.getFirst());
redisStandaloneConfiguration.setPort(hostAndPort.getSecond());
setUsername(config, redisStandaloneConfiguration);
setPassword(config, redisStandaloneConfiguration);
setDatabase(config, redisStandaloneConfiguration);
return redisStandaloneConfiguration;
}
private void setUsername(RedisPoolConfig.Config config, RedisConfiguration.WithPassword connectionFactory) {
if (null != config.getUsername() && !config.getUsername().isEmpty()) {
connectionFactory.setUsername(config.getUsername());
}
}
private void setPassword(RedisPoolConfig.Config config, RedisConfiguration.WithPassword connectionFactory) {
if (null != config.getPassword() && !config.getPassword().isEmpty()) {
connectionFactory.setPassword(config.getPassword());
}
}
private void setDatabase(RedisPoolConfig.Config config, RedisConfiguration.WithDatabaseIndex connectionFactory) {
if (null != config.getDatabase() && !config.getDatabase().isEmpty()) {
int database = Integer.parseInt(config.getDatabase());
connectionFactory.setDatabase(database);
}
}
private void setClusterConf(RedisPoolConfig.Config config, RedisClusterConfiguration redisClusterConfiguration) {
if (null != config.getClusterMaxRedirects() && !config.getClusterMaxRedirects().isEmpty()) {
int maxRedirects = Integer.parseInt(config.getClusterMaxRedirects());
redisClusterConfiguration.setMaxRedirects(maxRedirects);
}
}
private List<Pair<String, Integer>> parseClusterHostAndPort(String hostAndPortStr) {
String[] hosts = hostAndPortStr.split(",");
List<Pair<String, Integer>> hostAndPorts = new ArrayList<>();
for (String hostAndPort : hosts) {
Pair<String, Integer> pair = parseHostAndPort(hostAndPort);
if (null != pair) {
hostAndPorts.add(pair);
}
}
return hostAndPorts;
}
private Pair<String, Integer> parseHostAndPort(String hostAndPortStr) {
String[] hostAndPort = hostAndPortStr.split(":");
if (hostAndPort.length != 2) {
System.out.printf("Invalid host and port: %s\n", hostAndPortStr);
return null;
}
String host = hostAndPort[0].trim();
String port = hostAndPort[1].trim();
return Pair.of(host, Integer.parseInt(port));
}
}
缓存管理类
package org.example.redis.config;
import com.github.benmanes.caffeine.cache.*;
import lombok.extern.slf4j.Slf4j;
import org.example.redis.listener.CaffeineCacheRemovalListener;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.cache.caffeine.CaffeineCache;
import org.springframework.cache.interceptor.KeyGenerator;
import org.springframework.cache.interceptor.SimpleKeyGenerator;
import org.springframework.cache.support.SimpleCacheManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.data.redis.cache.RedisCacheConfiguration;
import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.cache.RedisCacheWriter;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.serializer.GenericJackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializationContext;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import javax.annotation.Resource;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Slf4j
@Configuration
@EnableCaching
@EnableConfigurationProperties({CustomerCacheProperties.class})
public class CacheManagerConfiguration {
public interface redisCacheKey {
public final static String cache10M = "cache10M";
public final static String cache30s = "cache30s";
}
public interface caffeineCacheKey {
public final static String cache10M = "cache10M";
public final static String cache30s = "cache30s";
}
@Resource
private CustomerCacheProperties customerCacheProperties;
public interface CacheManagerNames {
String REDIS_CACHE_MANAGER = "redisCacheManager";
String LOCAL_CACHE_MANAGER = "localCacheManager";
}
@Bean(name = CacheManagerNames.REDIS_CACHE_MANAGER)
@Primary
public RedisCacheManager redisCacheManager(RedisConnectionFactory factory) {
Map<String, RedisCacheConfiguration> expires = new HashMap<>();
customerCacheProperties.getRedis().forEach((name, time) -> {
expires.put(name, RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofSeconds(Integer.parseInt(time))));
});
// Map<String, RedisCacheConfiguration> expires = ImmutableMap.<String, RedisCacheConfiguration>builder()
// .put(cache15, RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofSeconds(15)))
// .put(cache30, RedisCacheConfiguration.defaultCacheConfig().entryTtl(Duration.ofSeconds(30)))
// .build();
RedisCacheManager redisCacheManager = RedisCacheManager.RedisCacheManagerBuilder
.fromConnectionFactory(factory)
.cacheDefaults(cacheConfiguration())
.withInitialCacheConfigurations(expires)
//事务感知功能有助于确保在事务提交之后缓存和数据库中的数据保持一致,这对于保证数据完整性和避免脏读非常重要。
// 然而,这也可能会增加一些性能开销,因此在不需要强一致性的场景下,可以考虑禁用这个特性以提高性能
.transactionAware()
.build();
//以锁写入的方式创建RedisCacheWriter对象
// RedisCacheWriter writer = RedisCacheWriter.lockingRedisCacheWriter(factory);
return redisCacheManager;
}
@Bean
public RedisCacheConfiguration cacheConfiguration() {
return RedisCacheConfiguration.defaultCacheConfig()
.disableCachingNullValues()
.serializeKeysWith(RedisSerializationContext.SerializationPair.fromSerializer(new StringRedisSerializer()))
.serializeValuesWith(RedisSerializationContext.SerializationPair.fromSerializer(new GenericJackson2JsonRedisSerializer()));
}
@Bean(name = CacheManagerNames.LOCAL_CACHE_MANAGER)
public CacheManager caffeineCacheManager() {
List<CaffeineCache> caffeineCaches = new ArrayList<>();
customerCacheProperties.getCaffeine().forEach((name, spec) -> {
CaffeineSpec caffeineSpec = CaffeineSpec.parse(spec);
Caffeine<Object, Object> caffeine = Caffeine.from(caffeineSpec);
caffeine.removalListener(new CaffeineCacheRemovalListener());
// caffeineCache.executor(cacheExecutor);
// 设置定时任务执行过期清除操作
//.scheduler(Scheduler.systemScheduler())
//cache对缓存写的通知回调
caffeine.writer(new CacheWriter<Object, Object>() {
@Override
public void write(Object key, Object value) {
log.info("CacheManager write key={}", key);
}
@Override
public void delete(Object key, Object value, RemovalCause cause) {
log.info("CacheManager delete key={}, cause={}", key, cause);
}
});
// //使用CacheLoader创建一个LoadingCache
// caffeine.build(new CacheLoader<String, String>() {
// //同步加载数据
// @Override
// public String load(String key) throws Exception {
// log.info("CacheManager load key={}", key);
// return "value_" + key;
// }
//
// //异步加载数据
// @Override
// public String reload(String key, String oldValue) throws Exception {
// log.info("CacheManager reload key={}", key);
// return "value_" + key;
// }
// });
CaffeineCache caffeineCache = new CaffeineCache(name, caffeine.build());
caffeineCaches.add(caffeineCache);
});
SimpleCacheManager simpleCacheManager = new SimpleCacheManager();
simpleCacheManager.setCaches(caffeineCaches);
return simpleCacheManager;
}
// @Override
// @Cacheable(key = "#userId", cacheNames = CacheManagerConfiguration.CacheNames.CACHE_15MINS,
// cacheManager = CacheManagerConfiguration.CacheManagerNames.EHCACHE_CACHE_MANAGER)
// public User findUserAccordingToId(Long userId) {
// return userRepository.findById(userId).orElse(User.builder().build());
// }
//
// @Override
// @Cacheable(key = "#username", cacheNames = CacheManagerConfiguration.CacheNames.CACHE_15MINS,
// cacheManager = CacheManagerConfiguration.CacheManagerNames.REDIS_CACHE_MANAGER)
// public User findUserAccordingToUserName(String username) {
// return userRepository.findUserByUsername(username);
// }
// @Cacheable( sync = true)
}
创建不同的template
package org.example.redis.config;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.Data;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Primary;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import static org.example.redis.config.RedisConnectionFactoryConfig.redisConnectionFactors;
@Configuration
@Data
@DependsOn("redisConnectionFactoryConfig")
public class RestTemplateConfig {
@Bean(name = "orderStringRedisTemplate")
@Primary
public StringRedisTemplate orderStringRedisTemplate() {
return buildStringRedisTemplate(redisConnectionFactors.get("redis-order"));
}
//
// public RedisTemplate<String, Object> buildObjRedisTemplate(RedisConnectionFactory factory) {
//
// RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
// redisTemplate.setConnectionFactory(factory);
//
// Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer = jackson2JsonRedisSerializer();
// redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
// redisTemplate.setKeySerializer(new StringRedisSerializer());
//
// redisTemplate.setHashKeySerializer(new StringRedisSerializer());
// redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);
// redisTemplate.afterPropertiesSet();
// return redisTemplate;
// }
public StringRedisTemplate buildStringRedisTemplate(RedisConnectionFactory factory) {
StringRedisTemplate redisTemplate = new StringRedisTemplate(factory);
Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer = jackson2JsonRedisSerializer();
redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
redisTemplate.setKeySerializer(new StringRedisSerializer());
redisTemplate.setHashKeySerializer(new StringRedisSerializer());
redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);
redisTemplate.afterPropertiesSet();
return redisTemplate;
}
private Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer() {
Jackson2JsonRedisSerializer<Object> jackson2JsonRedisSerializer = new Jackson2JsonRedisSerializer<>(Object.class);
ObjectMapper om = new ObjectMapper();
// 指定要序列化的域,field,get和set,以及修饰符范围,ANY是都有包括private和public
om.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
// 指定序列化输入的类型,类必须是非final修饰的,final修饰的类,比如String,Integer等会跑出异常
om.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL);
jackson2JsonRedisSerializer.setObjectMapper(om);
return jackson2JsonRedisSerializer;
}
}
使用实体类接收,使用Map接收。key需要根据自己的业务区分即可
package org.example.redis.config;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
import java.util.Map;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Component
@ConfigurationProperties(prefix = "redis")
public class RedisPoolConfig {
public Map<String, Config> redisConfigs;
@NoArgsConstructor
@AllArgsConstructor
@Data
public static class Config {
private String name;
private String type;
private String hostAndPort;
private String username;
private String password;
private String database;
// private String sentinelMasterHostAndPort; // for Sentine
private String masterName; // for Sentine
private String clusterMaxRedirects; // for Cluster
private String timeout;
private PoolConfig pool;
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class PoolConfig {
private String maxIdle;
private String minIdle;
private String maxActive;
private String maxWait;
}
}
}
缓存redis 以及caffeine
package org.example.redis.config;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
import java.util.Map;
@Data
@NoArgsConstructor
@AllArgsConstructor
@Component
@ConfigurationProperties(prefix = "cache")
public class CustomerCacheProperties {
public Map<String, String> caffeine;
public Map<String, String> redis;
}
<!-- lettuce这个客户端--> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-data-redis</artifactId> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-pool2</artifactId> </dependency><dependency> <groupId>com.github.ben-manes.caffeine</groupId> <artifactId>caffeine</artifactId> <version>2.9.3</version> </dependency>