单例模式 (Singleton Pattern) 源码实例大全
模式定义
确保一个类只有一个实例,并提供一个全局访问点。
框架源码实例
1. Spring Framework
DefaultSingletonBeanRegistry
// Spring Framework - 核心单例管理器
public class DefaultSingletonBeanRegistry extends SimpleAliasRegistry implements SingletonBeanRegistry {
// 单例对象的缓存池 - 使用ConcurrentHashMap保证线程安全
private final Map<String, Object> singletonObjects = new ConcurrentHashMap<>(256);
// 早期单例对象的缓存池
private final Map<String, Object> earlySingletonObjects = new HashMap<>(16);
// 单例工厂的缓存池
private final Map<String, ObjectFactory<?>> singletonFactories = new HashMap<>(16);
// 已注册的单例集合,按注册顺序保存
private final Set<String> registeredSingletons = new LinkedHashSet<>(256);
// 正在创建中的单例bean名称集合
private final Set<String> singletonsCurrentlyInCreation = Collections.newSetFromMap(new ConcurrentHashMap<>(16));
@Override
@Nullable
public Object getSingleton(String beanName) {
return getSingleton(beanName, true);
}
@Override
@Nullable
public Object getSingleton(String beanName, boolean allowEarlyReference) {
// 快速检查,无锁获取
Object singletonObject = this.singletonObjects.get(beanName);
// 如果单例不存在且当前正在创建中
if (singletonObject == null && isSingletonCurrentlyInCreation(beanName)) {
// 加锁检查早期引用
synchronized (this.singletonObjects) {
singletonObject = this.earlySingletonObjects.get(beanName);
if (singletonObject == null && allowEarlyReference) {
ObjectFactory<?> singletonFactory = this.singletonFactories.get(beanName);
if (singletonFactory != null) {
singletonObject = singletonFactory.getObject();
this.earlySingletonObjects.put(beanName, singletonObject);
this.singletonFactories.remove(beanName);
}
}
}
}
return singletonObject;
}
public void registerSingleton(String beanName, Object singletonObject) throws IllegalStateException {
Assert.notNull(beanName, "Bean name must not be null");
Assert.notNull(singletonObject, "Singleton object must not be null");
synchronized (this.singletonObjects) {
Object oldObject = this.singletonObjects.get(beanName);
if (oldObject != null) {
throw new IllegalStateException("Could not register object [" + singletonObject +
"] under bean name '" + beanName + "': there is already object [" + oldObject + "] bound");
}
addSingleton(beanName, singletonObject);
}
}
protected void addSingleton(String beanName, Object singletonObject) {
synchronized (this.singletonObjects) {
this.singletonObjects.put(beanName, singletonObject);
this.singletonFactories.remove(beanName);
this.earlySingletonObjects.remove(beanName);
this.registeredSingletons.add(beanName);
}
}
}
ApplicationContext 单例实现
// Spring应用上下文中的单例管理
public abstract class AbstractApplicationContext extends DefaultResourceLoader implements ConfigurableApplicationContext {
private final Object startupShutdownMonitor = new Object();
@Override
public void refresh() throws BeansException, IllegalStateException {
synchronized (this.startupShutdownMonitor) {
// 刷新应用上下文 - 使用单例模式确保线程安全
prepareRefresh();
ConfigurableListableBeanFactory beanFactory = obtainFreshBeanFactory();
prepareBeanFactory(beanFactory);
try {
postProcessBeanFactory(beanFactory);
invokeBeanFactoryPostProcessors(beanFactory);
registerBeanPostProcessors(beanFactory);
initMessageSource();
initApplicationEventMulticaster();
onRefresh();
registerListeners();
finishBeanFactoryInitialization(beanFactory);
finishRefresh();
} catch (BeansException ex) {
destroyBeans();
cancelRefresh(ex);
throw ex;
}
}
}
}
2. Kubernetes
RESTClient 单例实现
// Kubernetes client-go 中的 RESTClient 单例
type RESTClient struct {
baseURL *url.URL
content ClientContentConfig
throttled float64
throttle flowcontrol.RateLimiter
Client *http.Client
}
// 全局客户端实例 - 使用 sync.Once 确保线程安全的单例创建
var (
defaultClient *RESTClient
clientOnce sync.Once
)
// GetDefaultClient 获取单例客户端
func GetDefaultClient() *RESTClient {
clientOnce.Do(func() {
config := &rest.Config{
Host: "https://kubernetes.default.svc",
}
restClient, err := rest.RESTClientFor(config)
if err != nil {
panic(fmt.Sprintf("Failed to create default client: %v", err))
}
defaultClient = restClient.(*RESTClient)
})
return defaultClient
}
// 带配置的单例客户端
var (
clients = make(map[string]*RESTClient)
clientsLock sync.RWMutex
)
func GetClientForConfig(config *rest.Config) (*RESTClient, error) {
key := fmt.Sprintf("%s:%s", config.Host, config.Username)
clientsLock.RLock()
if client, exists := clients[key]; exists {
clientsLock.RUnlock()
return client, nil
}
clientsLock.RUnlock()
clientsLock.Lock()
defer clientsLock.Unlock()
// 双重检查
if client, exists := clients[key]; exists {
return client, nil
}
client, err := rest.RESTClientFor(config)
if err != nil {
return nil, err
}
restClient := client.(*RESTClient)
clients[key] = restClient
return restClient, nil
}
Controller Manager 单例
// Kubernetes controller manager 单例实现
type ControllerManager struct {
config *config.Config
client clientset.Interface
informers informers.SharedInformerFactory
stopCh chan struct{}
}
var (
manager *ControllerManager
managerOnce sync.Once
)
func GetControllerManager(cfg *config.Config) *ControllerManager {
managerOnce.Do(func() {
manager = &ControllerManager{
config: cfg,
stopCh: make(chan struct{}),
}
// 初始化客户端
kubeConfig, err := rest.InClusterConfig()
if err != nil {
panic(fmt.Sprintf("Failed to get in-cluster config: %v", err))
}
manager.client = clientset.NewForConfigOrDie(kubeConfig)
manager.informers = informers.NewSharedInformerFactory(manager.client, time.Hour)
})
return manager
}
// 带依赖注入的单例
type ServiceManager struct {
services map[string]interface{}
mu sync.RWMutex
}
var instance *ServiceManager
var once sync.Once
func GetServiceManager() *ServiceManager {
once.Do(func() {
instance = &ServiceManager{
services: make(map[string]interface{}),
}
instance.initServices()
})
return instance
}
func (sm *ServiceManager) initServices() {
// 初始化各种服务
sm.services["pod"] = NewPodService()
sm.services["service"] = NewServiceService()
sm.services["deployment"] = NewDeploymentService()
}
3. Nginx
Core Configuration Singleton
// Nginx 核心配置单例
typedef struct {
ngx_uint_t connection_n;
ngx_uint_t use_accept_mutex;
ngx_str_t accept_mutex_delay;
ngx_array_t events; /* array of ngx_event_conf_t */
} ngx_event_conf_t;
// 全局事件配置单例 - 使用静态变量
static ngx_event_conf_t *ngx_event_conf = NULL;
// 线程安全的单例获取函数
ngx_event_conf_t *ngx_event_get_conf(void) {
if (ngx_event_conf == NULL) {
// 使用内存池分配,确保生命周期管理
ngx_event_conf = ngx_pcalloc(ngx_cycle->pool, sizeof(ngx_event_conf_t));
if (ngx_event_conf == NULL) {
return NULL;
}
// 初始化默认值
ngx_event_conf->connection_n = 512;
ngx_event_conf->use_accept_mutex = 1;
ngx_str_set(&ngx_event_conf->accept_mutex_delay, "500ms");
}
return ngx_event_conf;
}
// 带锁的单例实现
static ngx_event_conf_t *ngx_event_conf_instance = NULL;
static ngx_atomic_t ngx_event_conf_lock = 0;
ngx_event_conf_t *ngx_event_get_conf_threadsafe(void) {
// 双重检查锁定
if (ngx_event_conf_instance == NULL) {
// 自旋锁获取
while (!ngx_atomic_cmp_set(&ngx_event_conf_lock, 0, 1)) {
ngx_cpu_pause();
}
// 再次检查
if (ngx_event_conf_instance == NULL) {
ngx_event_conf_instance = ngx_pcalloc(ngx_cycle->pool, sizeof(ngx_event_conf_t));
if (ngx_event_conf_instance != NULL) {
// 初始化配置
ngx_event_conf_instance->connection_n = 512;
ngx_event_conf_instance->use_accept_mutex = 1;
}
}
// 释放锁
ngx_atomic_cmp_set(&ngx_event_conf_lock, 1, 0);
}
return ngx_event_conf_instance;
}
Connection Pool Singleton
// Nginx 连接池单例
typedef struct {
ngx_queue_t free_connections;
ngx_connection_t *connections;
ngx_event_t *read_events;
ngx_event_t *write_events;
ngx_uint_t connection_n;
} ngx_connection_pool_t;
static ngx_connection_pool_t *ngx_connection_pool = NULL;
ngx_connection_pool_t *ngx_get_connection_pool(ngx_uint_t n) {
if (ngx_connection_pool == NULL) {
ngx_connection_pool = ngx_alloc(sizeof(ngx_connection_pool_t), ngx_cycle->log);
if (ngx_connection_pool == NULL) {
return NULL;
}
ngx_memzero(ngx_connection_pool, sizeof(ngx_connection_pool_t));
// 分配连接数组
ngx_connection_pool->connections = ngx_alloc(sizeof(ngx_connection_t) * n, ngx_cycle->log);
ngx_connection_pool->read_events = ngx_alloc(sizeof(ngx_event_t) * n, ngx_cycle->log);
ngx_connection_pool->write_events = ngx_alloc(sizeof(ngx_event_t) * n, ngx_cycle->log);
if (ngx_connection_pool->connections == NULL ||
ngx_connection_pool->read_events == NULL ||
ngx_connection_pool->write_events == NULL) {
return NULL;
}
ngx_connection_pool->connection_n = n;
// 初始化空闲连接队列
ngx_queue_init(&ngx_connection_pool->free_connections);
// 将所有连接加入空闲队列
ngx_uint_t i;
ngx_connection_t *c;
for (i = 0; i < n; i++) {
c = &ngx_connection_pool->connections[i];
ngx_queue_insert_tail(&ngx_connection_pool->free_connections, &c->queue);
}
}
return ngx_connection_pool;
}
4. Elasticsearch
Node Client Singleton
// Elasticsearch 节点客户端单例
public class NodeClient extends AbstractClient {
private final Node node;
private final Settings settings;
private static volatile NodeClient instance;
private static final Object lock = new Object();
// 双重检查锁定实现单例
public static NodeClient getInstance(Settings settings) {
if (instance == null) {
synchronized (lock) {
if (instance == null) {
instance = new NodeClient(settings);
}
}
}
return instance;
}
private NodeClient(Settings settings) {
super(settings, null);
this.settings = settings;
this.node = new Node(InternalSettingsPreparer.prepareEnvironment(settings, null));
}
@Override
public void close() throws IOException {
// 单例通常不关闭,除非应用退出
if (node != null) {
node.close();
}
}
}
// 使用枚举实现线程安全的单例
public enum ElasticsearchClient {
INSTANCE;
private final RestHighLevelClient client;
private final Node node;
ElasticsearchClient() {
try {
Settings settings = Settings.builder()
.put("cluster.name", "elasticsearch")
.put("node.name", "node-client")
.build();
this.node = new Node(settings);
this.client = new RestHighLevelClient(
RestClient.builder(new HttpHost("localhost", 9200, "http"))
);
} catch (Exception e) {
throw new RuntimeException("Failed to initialize Elasticsearch client", e);
}
}
public RestHighLevelClient getClient() {
return client;
}
public void shutdown() throws IOException {
if (client != null) {
client.close();
}
if (node != null) {
node.close();
}
}
}
Cluster Service Singleton
// Elasticsearch 集群服务单例
public class ClusterService extends AbstractLifecycleComponent {
private final ClusterStatePublisher publisher;
private final MasterService masterService;
private final ClusterApplierService clusterApplierService;
private static ClusterService instance;
private static final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
public static ClusterService getInstance(Settings settings, ClusterSettings clusterSettings,
ThreadPool threadPool) {
lock.readLock().lock();
try {
if (instance != null) {
return instance;
}
} finally {
lock.readLock().unlock();
}
lock.writeLock().lock();
try {
if (instance == null) {
instance = new ClusterService(settings, clusterSettings, threadPool);
}
return instance;
} finally {
lock.writeLock().unlock();
}
}
private ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
super(settings);
this.masterService = new MasterService(settings, threadPool);
this.clusterApplierService = new ClusterApplierService(settings, threadPool);
this.publisher = new ClusterStatePublisher();
}
}
5. MySQL
Connection Pool Singleton
// MySQL Connector/C++ 连接池单例
class MySQL_ConnectionPool {
private:
static MySQL_ConnectionPool* instance;
static std::mutex mutex_;
sql::Driver* driver;
std::map<std::string, std::unique_ptr<sql::Connection>> connections;
std::map<std::string, std::chrono::steady_clock::time_point> last_used;
MySQL_ConnectionPool() {
driver = get_driver_instance();
}
public:
// 删除拷贝构造函数和赋值运算符
MySQL_ConnectionPool(const MySQL_ConnectionPool&) = delete;
MySQL_ConnectionPool& operator=(const MySQL_ConnectionPool&) = delete;
static MySQL_ConnectionPool* getInstance() {
std::lock_guard<std::mutex> lock(mutex_);
if (instance == nullptr) {
instance = new MySQL_ConnectionPool();
}
return instance;
}
sql::Connection* getConnection(const std::string& connection_string) {
std::lock_guard<std::mutex> lock(mutex_);
auto it = connections.find(connection_string);
if (it != connections.end() && it->second->isValid()) {
last_used[connection_string] = std::chrono::steady_clock::now();
return it->second.get();
}
// 创建新连接
auto conn = std::unique_ptr<sql::Connection>(driver->connect(connection_string));
sql::Connection* raw_ptr = conn.get();
connections[connection_string] = std::move(conn);
last_used[connection_string] = std::chrono::steady_clock::now();
return raw_ptr;
}
};
// 静态成员初始化
MySQL_ConnectionPool* MySQL_ConnectionPool::instance = nullptr;
std::mutex MySQL_ConnectionPool::mutex_;
MySQL Server Instance Manager
// MySQL Server 实例管理器单例
class MySQLInstanceManager {
private:
static MySQLInstanceManager* instance;
static pthread_mutex_t mutex;
MYSQL* mysql;
bool connected;
std::string server_version;
MySQLInstanceManager() : mysql(nullptr), connected(false) {
mysql = mysql_init(nullptr);
if (mysql == nullptr) {
throw std::runtime_error("Failed to initialize MySQL connection");
}
}
public:
~MySQLInstanceManager() {
if (mysql != nullptr) {
mysql_close(mysql);
}
pthread_mutex_destroy(&mutex);
}
static MySQLInstanceManager* getInstance() {
pthread_mutex_lock(&mutex);
if (instance == nullptr) {
try {
instance = new MySQLInstanceManager();
} catch (const std::exception& e) {
pthread_mutex_unlock(&mutex);
throw;
}
}
pthread_mutex_unlock(&mutex);
return instance;
}
bool connect(const char* host, const char* user, const char* password, const char* database) {
pthread_mutex_lock(&mutex);
if (connected) {
mysql_close(mysql);
mysql = mysql_init(nullptr);
}
if (mysql_real_connect(mysql, host, user, password, database, 0, nullptr, 0) == nullptr) {
connected = false;
pthread_mutex_unlock(&mutex);
return false;
}
connected = true;
server_version = mysql_get_server_info(mysql);
pthread_mutex_unlock(&mutex);
return true;
}
MYSQL_RES* executeQuery(const char* query) {
pthread_mutex_lock(&mutex);
if (!connected || mysql_query(mysql, query) != 0) {
pthread_mutex_unlock(&mutex);
return nullptr;
}
MYSQL_RES* result = mysql_store_result(mysql);
pthread_mutex_unlock(&mutex);
return result;
}
};
// 静态成员和互斥锁初始化
MySQLInstanceManager* MySQLInstanceManager::instance = nullptr;
pthread_mutex_t MySQLInstanceManager::mutex = PTHREAD_MUTEX_INITIALIZER;
6. Redis
Redis Client Singleton
// Redis C客户端单例实现
typedef struct {
redisContext *context;
char *hostname;
int port;
struct timeval timeout;
bool connected;
} RedisClient;
static RedisClient *redis_instance = NULL;
static pthread_mutex_t redis_mutex = PTHREAD_MUTEX_INITIALIZER;
RedisClient* redis_get_instance(const char *hostname, int port) {
pthread_mutex_lock(&redis_mutex);
if (redis_instance == NULL) {
redis_instance = (RedisClient*)malloc(sizeof(RedisClient));
if (redis_instance == NULL) {
pthread_mutex_unlock(&redis_mutex);
return NULL;
}
redis_instance->hostname = strdup(hostname);
redis_instance->port = port;
redis_instance->timeout.tv_sec = 5;
redis_instance->timeout.tv_usec = 0;
redis_instance->connected = false;
redis_instance->context = NULL;
}
// 如果未连接,建立连接
if (!redis_instance->connected || redis_instance->context == NULL) {
redis_instance->context = redisConnectWithTimeout(hostname, port, redis_instance->timeout);
if (redis_instance->context == NULL || redis_instance->context->err) {
if (redis_instance->context) {
printf("Connection error: %s\n", redis_instance->context->errstr);
redisFree(redis_instance->context);
redis_instance->context = NULL;
} else {
printf("Connection error: can't allocate redis context\n");
}
pthread_mutex_unlock(&redis_mutex);
return NULL;
}
redis_instance->connected = true;
}
pthread_mutex_unlock(&redis_mutex);
return redis_instance;
}
// 带连接池的Redis单例
typedef struct redis_connection_pool {
redisContext **connections;
int max_connections;
int current_connections;
pthread_mutex_t pool_mutex;
} RedisConnectionPool;
static RedisConnectionPool *redis_pool = NULL;
static pthread_once_t redis_pool_once = PTHREAD_ONCE_INIT;
static void redis_pool_init() {
redis_pool = (RedisConnectionPool*)malloc(sizeof(RedisConnectionPool));
redis_pool->max_connections = 10;
redis_pool->current_connections = 0;
redis_pool->connections = (redisContext**)malloc(sizeof(redisContext*) * redis_pool->max_connections);
pthread_mutex_init(&redis_pool->pool_mutex, NULL);
}
RedisConnectionPool* redis_get_connection_pool() {
pthread_once(&redis_pool_once, redis_pool_init);
return redis_pool;
}
7. MongoDB
MongoDB Client Singleton
// MongoDB Node.js驱动单例
class MongoDBClient {
constructor() {
this.client = null;
this.db = null;
this.connected = false;
}
static getInstance() {
if (!MongoDBClient.instance) {
MongoDBClient.instance = new MongoDBClient();
}
return MongoDBClient.instance;
}
async connect(uri, dbName) {
if (this.connected) {
return this.db;
}
try {
this.client = new MongoClient(uri, {
useNewUrlParser: true,
useUnifiedTopology: true,
maxPoolSize: 10,
serverSelectionTimeoutMS: 5000,
});
await this.client.connect();
this.db = this.client.db(dbName);
this.connected = true;
console.log('Connected to MongoDB');
return this.db;
} catch (error) {
console.error('MongoDB connection error:', error);
throw error;
}
}
getDatabase() {
if (!this.connected) {
throw new Error('MongoDB client not connected');
}
return this.db;
}
async close() {
if (this.client) {
await this.client.close();
this.connected = false;
this.client = null;
this.db = null;
}
}
}
// 使用Proxy确保真正的单例
const MongoDBClientProxy = new Proxy(MongoDBClient, {
construct(target, args) {
return target.getInstance();
}
});
// 导出单例实例
module.exports = MongoDBClient.getInstance();
8. PostgreSQL
PostgreSQL Connection Singleton
# PostgreSQL Python单例实现
import psycopg2
from psycopg2 import pool
import threading
class PostgreSQLConnectionPool:
_instance = None
_lock = threading.Lock()
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized:
return
try:
self.connection_pool = psycopg2.pool.ThreadedConnectionPool(
1, # 最小连接数
20, # 最大连接数
host='localhost',
database='mydb',
user='postgres',
password='password',
port='5432'
)
self._initialized = True
print("PostgreSQL connection pool created")
except Exception as e:
print(f"Error creating connection pool: {e}")
raise
def get_connection(self):
return self.connection_pool.getconn()
def return_connection(self, connection):
self.connection_pool.putconn(connection)
def close_all_connections(self):
if self.connection_pool:
self.connection_pool.closeall()
# 使用装饰器实现单例
def singleton(cls):
instances = {}
lock = threading.Lock()
def get_instance(*args, **kwargs):
if cls not in instances:
with lock:
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance
@singleton
class PostgreSQLManager:
def __init__(self, connection_string):
self.connection_string = connection_string
self._connection = None
def get_connection(self):
if self._connection is None or self._connection.closed:
self._connection = psycopg2.connect(self.connection_string)
return self._connection
def execute_query(self, query, params=None):
conn = self.get_connection()
with conn.cursor() as cursor:
cursor.execute(query, params)
if cursor.description: # SELECT query
return cursor.fetchall()
else: # INSERT, UPDATE, DELETE
conn.commit()
return cursor.rowcount
9. Apache Kafka
Kafka Producer Singleton
// Apache Kafka 生产者单例
public class KafkaProducerSingleton {
private static volatile KafkaProducer<String, String> instance;
private static final Object lock = new Object();
private KafkaProducerSingleton() {}
public static KafkaProducer<String, String> getInstance() {
if (instance == null) {
synchronized (lock) {
if (instance == null) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("acks", "all");
props.put("retries", 3);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
instance = new KafkaProducer<>(props);
}
}
}
return instance;
}
public static void shutdown() {
if (instance != null) {
synchronized (lock) {
if (instance != null) {
instance.close();
instance = null;
}
}
}
}
}
// Kafka Consumer 单例
public class KafkaConsumerSingleton {
private static final ConcurrentHashMap<String, KafkaConsumer<String, String>> consumers =
new ConcurrentHashMap<>();
private static final Object lock = new Object();
public static KafkaConsumer<String, String> getConsumer(String groupId) {
return consumers.computeIfAbsent(groupId, k -> {
synchronized (lock) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("group.id", groupId);
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
return new KafkaConsumer<>(props);
}
});
}
public static void shutdownConsumer(String groupId) {
KafkaConsumer<String, String> consumer = consumers.remove(groupId);
if (consumer != null) {
consumer.close();
}
}
}
10. RabbitMQ
RabbitMQ Connection Singleton
// RabbitMQ Node.js连接单例
const amqp = require('amqplib');
class RabbitMQConnection {
constructor() {
this.connection = null;
this.channel = null;
this.connected = false;
}
static getInstance() {
if (!RabbitMQConnection.instance) {
RabbitMQConnection.instance = new RabbitMQConnection();
}
return RabbitMQConnection.instance;
}
async connect(amqpUrl = 'amqp://localhost') {
if (this.connected && this.connection) {
return this.channel;
}
try {
this.connection = await amqp.connect(amqpUrl);
this.channel = await this.connection.createChannel();
this.connected = true;
// 设置连接错误处理
this.connection.on('error', (err) => {
console.error('RabbitMQ connection error:', err);
this.connected = false;
});
this.connection.on('close', () => {
console.log('RabbitMQ connection closed');
this.connected = false;
});
console.log('Connected to RabbitMQ');
return this.channel;
} catch (error) {
console.error('Failed to connect to RabbitMQ:', error);
throw error;
}
}
async disconnect() {
if (this.connection) {
await this.connection.close();
this.connected = false;
this.connection = null;
this.channel = null;
}
}
getChannel() {
if (!this.connected || !this.channel) {
throw new Error('RabbitMQ not connected');
}
return this.channel;
}
}
// 使用Proxy确保单例
const RabbitMQProxy = new Proxy(RabbitMQConnection, {
construct(target, args) {
return target.getInstance();
}
});
module.exports = RabbitMQConnection.getInstance();
11. Docker Engine
Docker Client Singleton
// Docker客户端单例实现
package docker
import (
"context"
"sync"
"github.com/docker/docker/client"
)
type DockerClient struct {
client *client.Client
ctx context.Context
}
var (
instance *DockerClient
once sync.Once
initErr error
)
func GetDockerClient() (*DockerClient, error) {
once.Do(func() {
instance = &DockerClient{}
instance.ctx = context.Background()
// 创建Docker客户端
cli, err := client.NewClientWithOpts(
client.FromEnv,
client.WithAPIVersionNegotiation(),
)
if err != nil {
initErr = err
return
}
instance.client = cli
// 验证连接
_, err = instance.client.Ping(instance.ctx)
if err != nil {
initErr = err
return
}
})
if initErr != nil {
return nil, initErr
}
return instance, nil
}
func (d *DockerClient) GetClient() *client.Client {
return d.client
}
func (d *DockerClient) GetContext() context.Context {
return d.ctx
}
func (d *DockerClient) Close() error {
if d.client != nil {
return d.client.Close()
}
return nil
}
12. Apache ZooKeeper
ZooKeeper Client Singleton
// ZooKeeper客户端单例
public class ZooKeeperClientSingleton {
private static volatile ZooKeeper zooKeeper;
private static final Object lock = new Object();
private static final CountDownLatch connectedSignal = new CountDownLatch(1);
private ZooKeeperClientSingleton() {}
public static ZooKeeper getInstance(String connectString, int sessionTimeout) throws IOException, InterruptedException {
if (zooKeeper == null) {
synchronized (lock) {
if (zooKeeper == null) {
zooKeeper = new ZooKeeper(connectString, sessionTimeout, new Watcher() {
@Override
public void process(WatchedEvent event) {
if (event.getState() == Event.KeeperState.SyncConnected) {
connectedSignal.countDown();
}
}
});
// 等待连接建立
connectedSignal.await(sessionTimeout, TimeUnit.MILLISECONDS);
}
}
}
return zooKeeper;
}
public static void close() throws InterruptedException {
synchronized (lock) {
if (zooKeeper != null) {
zooKeeper.close();
zooKeeper = null;
}
}
}
public static boolean isConnected() {
return zooKeeper != null && zooKeeper.getState() == ZooKeeper.States.CONNECTED;
}
}
13. Consul
Consul Client Singleton
# Consul Python客户端单例
import consul
import threading
class ConsulClientSingleton:
_instance = None
_lock = threading.Lock()
def __new__(cls, host='localhost', port=8500):
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self, host='localhost', port=8500):
if self._initialized:
return
try:
self.consul = consul.Consul(host=host, port=port)
self._initialized = True
print(f"Consul client connected to {host}:{port}")
except Exception as e:
print(f"Failed to connect to Consul: {e}")
raise
def get_service(self, service_name):
"""获取服务实例"""
index, services = self.consul.health.service(service_name, passing=True)
if services:
service = services[0]
return {
'address': service['Service']['Address'],
'port': service['Service']['Port'],
'tags': service['Service']['Tags']
}
return None
def register_service(self, name, service_id, address, port, tags=None, check=None):
"""注册服务"""
return self.consul.agent.service.register(
name=name,
service_id=service_id,
address=address,
port=port,
tags=tags or [],
check=check
)
def deregister_service(self, service_id):
"""注销服务"""
return self.consul.agent.service.deregister(service_id)
def put_key(self, key, value):
"""存储键值对"""
return self.consul.kv.put(key, value)
def get_key(self, key):
"""获取键值"""
index, data = self.consul.kv.get(key)
if data:
return data['Value'].decode('utf-8')
return None
14. etcd
etcd Client Singleton
// etcd客户端单例实现
package etcd
import (
"context"
"sync"
"time"
clientv3 "go.etcd.io/etcd/client/v3"
)
type EtcdClient struct {
client *clientv3.Client
ctx context.Context
}
var (
instance *EtcdClient
once sync.Once
initErr error
)
func GetEtcdClient(endpoints []string) (*EtcdClient, error) {
once.Do(func() {
instance = &EtcdClient{}
instance.ctx = context.Background()
// 创建etcd客户端
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: 5 * time.Second,
})
if err != nil {
initErr = err
return
}
instance.client = cli
// 验证连接
ctx, cancel := context.WithTimeout(instance.ctx, 3*time.Second)
defer cancel()
_, err = instance.client.Status(ctx, endpoints[0])
if err != nil {
initErr = err
return
}
})
if initErr != nil {
return nil, initErr
}
return instance, nil
}
func (e *EtcdClient) Put(key, value string) error {
ctx, cancel := context.WithTimeout(e.ctx, 3*time.Second)
defer cancel()
_, err := e.client.Put(ctx, key, value)
return err
}
func (e *EtcdClient) Get(key string) (string, error) {
ctx, cancel := context.WithTimeout(e.ctx, 3*time.Second)
defer cancel()
resp, err := e.client.Get(ctx, key)
if err != nil {
return "", err
}
if len(resp.Kvs) > 0 {
return string(resp.Kvs[0].Value), nil
}
return "", nil
}
func (e *EtcdClient) Watch(key string, watchChan chan<- *clientv3.WatchResponse) {
go func() {
watchResp := e.client.Watch(e.ctx, key)
for wresp := range watchResp {
watchChan <- &wresp
}
}()
}
func (e *EtcdClient) Close() error {
if e.client != nil {
return e.client.Close()
}
return nil
}
15. Prometheus
Prometheus Client Singleton
// Prometheus客户端单例
package metrics
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
)
type PrometheusClient struct {
registry *prometheus.Registry
counters map[string]prometheus.Counter
gauges map[string]prometheus.Gauge
histograms map[string]prometheus.Histogram
}
var (
instance *PrometheusClient
once sync.Once
)
func GetPrometheusClient() *PrometheusClient {
once.Do(func() {
instance = &PrometheusClient{
registry: prometheus.NewRegistry(),
counters: make(map[string]prometheus.Counter),
gauges: make(map[string]prometheus.Gauge),
histograms: make(map[string]prometheus.Histogram),
}
// 注册标准指标
instance.registry.MustRegister(prometheus.NewGoCollector())
instance.registry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
})
return instance
}
func (p *PrometheusClient) RegisterCounter(name, help string, labels ...string) prometheus.Counter {
counter := prometheus.NewCounter(prometheus.CounterOpts{
Name: name,
Help: help,
})
if len(labels) > 0 {
counter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: name,
Help: help,
},
labels,
).WithLabelValues()
}
p.registry.MustRegister(counter)
p.counters[name] = counter
return counter
}
func (p *PrometheusClient) RegisterGauge(name, help string, labels ...string) prometheus.Gauge {
gauge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: name,
Help: help,
})
if len(labels) > 0 {
gauge = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: name,
Help: help,
},
labels,
).WithLabelValues()
}
p.registry.MustRegister(gauge)
p.gauges[name] = gauge
return gauge
}
func (p *PrometheusClient) GetRegistry() *prometheus.Registry {
return p.registry
}
16. gRPC
gRPC Client Singleton
// gRPC客户端单例实现
public class GrpcClientSingleton {
private static volatile ManagedChannel channel;
private static volatile GreeterGrpc.GreeterBlockingStub blockingStub;
private static final Object lock = new Object();
private GrpcClientSingleton() {}
public static GreeterGrpc.GreeterBlockingStub getBlockingStub(String host, int port) {
if (blockingStub == null) {
synchronized (lock) {
if (blockingStub == null) {
channel = ManagedChannelBuilder.forAddress(host, port)
.usePlaintext()
.build();
blockingStub = GreeterGrpc.newBlockingStub(channel);
}
}
}
return blockingStub;
}
public static void shutdown() throws InterruptedException {
synchronized (lock) {
if (channel != null && !channel.isShutdown()) {
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
channel = null;
blockingStub = null;
}
}
}
public static boolean isShutdown() {
return channel == null || channel.isShutdown();
}
}
// 带连接池的gRPC单例
public class GrpcConnectionPool {
private static final Map<String, ManagedChannel> channels = new ConcurrentHashMap<>();
private static final Map<String, Object> stubs = new ConcurrentHashMap<>();
private static final int MAX_CHANNELS = 10;
private GrpcConnectionPool() {}
@SuppressWarnings("unchecked")
public static <T> T getStub(String serviceName, String host, int port, Class<T> stubClass) {
String key = serviceName + ":" + host + ":" + port;
return (T) stubs.computeIfAbsent(key, k -> {
ManagedChannel channel = ManagedChannelBuilder.forAddress(host, port)
.usePlaintext()
.build();
channels.put(key, channel);
// 根据stub类型创建对应的stub
if (stubClass == GreeterGrpc.GreeterBlockingStub.class) {
return GreeterGrpc.newBlockingStub(channel);
} else if (stubClass == GreeterGrpc.GreeterStub.class) {
return GreeterGrpc.newStub(channel);
}
throw new IllegalArgumentException("Unsupported stub class: " + stubClass);
});
}
public static void shutdownAll() {
channels.values().forEach(channel -> {
try {
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
});
channels.clear();
stubs.clear();
}
}
17. GraphQL
GraphQL Client Singleton
// GraphQL客户端单例
const { ApolloClient, InMemoryCache, HttpLink } = require('@apollo/client');
const { setContext } = require('apollo-link-context');
const fetch = require('cross-fetch');
class GraphQLClientSingleton {
constructor() {
this.client = null;
}
static getInstance() {
if (!GraphQLClientSingleton.instance) {
GraphQLClientSingleton.instance = new GraphQLClientSingleton();
GraphQLClientSingleton.instance.initialize();
}
return GraphQLClientSingleton.instance;
}
initialize() {
// HTTP链接
const httpLink = new HttpLink({
uri: process.env.GRAPHQL_ENDPOINT || 'http://localhost:4000/graphql',
fetch: fetch
});
// 认证链接
const authLink = setContext((_, { headers }) => {
// 从localStorage或其他地方获取token
const token = localStorage.getItem('authToken');
return {
headers: {
...headers,
authorization: token ? `Bearer ${token}` : "",
}
};
});
// 创建Apollo客户端
this.client = new ApolloClient({
link: authLink.concat(httpLink),
cache: new InMemoryCache({
typePolicies: {
Query: {
fields: {
// 自定义缓存策略
}
}
}
}),
defaultOptions: {
watchQuery: {
fetchPolicy: 'cache-and-network',
errorPolicy: 'ignore',
},
query: {
fetchPolicy: 'network-only',
errorPolicy: 'all',
},
}
});
}
getClient() {
return this.client;
}
async query(query, variables = {}) {
try {
const result = await this.client.query({
query: query,
variables: variables,
});
return result.data;
} catch (error) {
console.error('GraphQL query error:', error);
throw error;
}
}
async mutate(mutation, variables = {}) {
try {
const result = await this.client.mutate({
mutation: mutation,
variables: variables,
});
return result.data;
} catch (error) {
console.error('GraphQL mutation error:', error);
throw error;
}
}
}
module.exports = GraphQLClientSingleton.getInstance();
18. WebSocket
WebSocket Connection Singleton
// WebSocket连接单例
class WebSocketConnection {
constructor() {
this.ws = null;
this.url = null;
this.reconnectInterval = 5000;
this.reconnectTimer = null;
this.messageHandlers = new Map();
this.connectionPromise = null;
}
static getInstance() {
if (!WebSocketConnection.instance) {
WebSocketConnection.instance = new WebSocketConnection();
}
return WebSocketConnection.instance;
}
connect(url) {
if (this.url === url && this.ws && this.ws.readyState === WebSocket.OPEN) {
return Promise.resolve(this.ws);
}
this.url = url;
if (this.connectionPromise) {
return this.connectionPromise;
}
this.connectionPromise = new Promise((resolve, reject) => {
try {
this.ws = new WebSocket(url);
this.ws.onopen = () => {
console.log('WebSocket connected');
this.stopReconnect();
resolve(this.ws);
};
this.ws.onmessage = (event) => {
this.handleMessage(event.data);
};
this.ws.onclose = () => {
console.log('WebSocket disconnected');
this.startReconnect();
};
this.ws.onerror = (error) => {
console.error('WebSocket error:', error);
reject(error);
};
} catch (error) {
reject(error);
}
});
return this.connectionPromise;
}
handleMessage(data) {
try {
const message = JSON.parse(data);
const handlers = this.messageHandlers.get(message.type) || [];
handlers.forEach(handler => handler(message.payload));
} catch (error) {
console.error('Failed to parse message:', error);
}
}
on(messageType, handler) {
if (!this.messageHandlers.has(messageType)) {
this.messageHandlers.set(messageType, []);
}
this.messageHandlers.get(messageType).push(handler);
}
off(messageType, handler) {
const handlers = this.messageHandlers.get(messageType);
if (handlers) {
const index = handlers.indexOf(handler);
if (index > -1) {
handlers.splice(index, 1);
}
}
}
send(data) {
if (this.ws && this.ws.readyState === WebSocket.OPEN) {
this.ws.send(JSON.stringify(data));
} else {
console.warn('WebSocket is not connected');
}
}
startReconnect() {
if (this.reconnectTimer) return;
this.reconnectTimer = setTimeout(() => {
console.log('Attempting to reconnect...');
this.connect(this.url);
}, this.reconnectInterval);
}
stopReconnect() {
if (this.reconnectTimer) {
clearTimeout(this.reconnectTimer);
this.reconnectTimer = null;
}
}
disconnect() {
this.stopReconnect();
if (this.ws) {
this.ws.close();
this.ws = null;
}
this.url = null;
this.connectionPromise = null;
}
}
// 导出单例实例
module.exports = WebSocketConnection.getInstance();
19. TensorFlow
TensorFlow Session Singleton
# TensorFlow会话单例
import tensorflow as tf
import threading
class TensorFlowSessionSingleton:
_instance = None
_lock = threading.Lock()
_session = None
_graph = None
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized:
return
# 创建新的计算图
self._graph = tf.Graph()
with self._graph.as_default():
# 配置GPU选项
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
# 创建会话
self._session = tf.Session(config=config, graph=self._graph)
self._initialized = True
print("TensorFlow session created")
def get_session(self):
return self._session
def get_graph(self):
return self._graph
def run(self, fetches, feed_dict=None):
return self._session.run(fetches, feed_dict=feed_dict)
def close(self):
if self._session:
self._session.close()
self._session = None
self._graph = None
def __del__(self):
self.close()
# 使用装饰器实现单例
def singleton_tf_session(cls):
instances = {}
lock = threading.Lock()
def get_instance(*args, **kwargs):
if cls not in instances:
with lock:
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance
@singleton_tf_session
class TFModelManager:
def __init__(self, model_path=None):
self.session = tf.Session()
self.model = None
if model_path:
self.load_model(model_path)
def load_model(self, model_path):
with self.session.as_default():
# 加载模型
saver = tf.train.import_meta_graph(model_path + '.meta')
saver.restore(self.session, model_path)
self.model = tf.get_default_graph()
def predict(self, input_data):
if self.model is None:
raise ValueError("Model not loaded")
with self.session.as_default():
# 获取输入输出张量
input_tensor = self.model.get_tensor_by_name("input:0")
output_tensor = self.model.get_tensor_by_name("output:0")
# 运行预测
feed_dict = {input_tensor: input_data}
output = self.session.run(output_tensor, feed_dict=feed_dict)
return output
20. PyTorch
PyTorch Model Singleton
# PyTorch模型单例
import torch
import torch.nn as nn
import threading
class PyTorchModelSingleton:
_instance = None
_lock = threading.Lock()
_model = None
_device = None
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized:
return
# 设置设备
self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 创建模型(示例)
self._model = self._create_model().to(self._device)
self._model.eval() # 设置为评估模式
self._initialized = True
print(f"PyTorch model created on device: {self._device}")
def _create_model(self):
# 示例模型
class SimpleModel(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
return SimpleModel()
def get_model(self):
return self._model
def get_device(self):
return self._device
def predict(self, input_data):
with torch.no_grad():
input_tensor = torch.tensor(input_data, dtype=torch.float32).to(self._device)
output = self._model(input_tensor)
return output.cpu().numpy()
def load_state_dict(self, state_dict_path):
state_dict = torch.load(state_dict_path, map_location=self._device)
self._model.load_state_dict(state_dict)
def save_state_dict(self, path):
torch.save(self._model.state_dict(), path)
# 使用元类实现单例
class SingletonMeta(type):
_instances = {}
_lock = threading.Lock()
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
with cls._lock:
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class PyTorchManager(metaclass=SingletonMeta):
def __init__(self, model_class, *args, **kwargs):
self.model = model_class(*args, **kwargs)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model.to(self.device)
self.model.eval()
def inference(self, input_tensor):
with torch.no_grad():
input_tensor = input_tensor.to(self.device)
output = self.model(input_tensor)
return output.cpu()
单例模式实现对比分析
1. 线程安全实现方式
| 实现方式 | 框架示例 | 优点 | 缺点 |
|---|---|---|---|
| 双重检查锁定 | Spring, Elasticsearch | 高性能,延迟初始化 | 实现复杂 |
| 静态内部类 | Kubernetes client-go | 线程安全,延迟初始化 | 不能传参数 |
| 枚举单例 | Java框架常用 | 线程安全,防反射攻击 | 早期初始化 |
| sync.Once | Go语言标准 | 简洁,线程安全 | Go特有 |
| pthread_once | C语言(Nginx) | 标准线程安全 | C语言特有 |
2. 生命周期管理
- Spring: 使用IoC容器管理单例生命周期
- Kubernetes: 进程级别单例,随应用生命周期
- Nginx: 使用内存池管理,与worker进程绑定
- 数据库客户端: 通常需要显式关闭连接
3. 性能优化策略
- 连接池: MySQL, Redis, MongoDB等使用连接池
- 懒加载: 大多数框架采用延迟初始化
- 缓存: Spring使用多级缓存优化单例获取
- 无锁编程: 使用原子操作减少锁竞争
最佳实践总结
1. 何时使用单例模式
- 需要全局唯一实例的资源管理器
- 配置信息的全局访问点
- 数据库连接池管理
- 日志记录器
- 缓存管理器
2. 实现注意事项
- 确保线程安全
- 考虑延迟初始化
- 处理好资源清理
- 防止反射和序列化攻击
- 考虑性能影响
3. 现代替代方案
- 依赖注入框架(Spring)
- 服务定位器模式
- 上下文对象传递
- 函数式编程中的闭包
单例模式虽然简单,但在现代软件架构中仍然扮演着重要角色,特别是在资源管理和全局配置方面。通过分析这些主流框架的实现,我们可以看到单例模式在不同编程语言和运行环境下的多样化应用。

2603

被折叠的 条评论
为什么被折叠?



