SNEC ES+圆满落幕!弘正储能双奖加冕,实力闪耀“十大亮点”榜单

9月27日,为期三天的SNEC ES+ 第九届(2024)国际储能和电池技术及装备(上海)展览会圆满落幕。在这场汇聚全球智慧与创新的舞台上,弘正携数字化储能解决方案精彩亮相,以数字化力量领航智慧能源新时代。

展会现场,弘正储能聚焦全场景创新技术与数字化储能解决方案,重点展示了EMS能量管理系统、D-Galaxy系列虚拟电厂平台、智慧储能管控云平台等数字化产品,展现了弘正储能在数字化技术领域的深厚积累与行业领先的产品力,以及公司对绿色能源未来趋势的深刻洞察与前瞻布局。

在备受瞩目的SNEC ES+国际储能“十大亮点评选与发布”活动中,弘正储能并凭借深厚数字化技术积淀和卓越创新力,在众多优秀品牌中脱颖而出,荣获“储能卓越技术奖”和“储能创新力企业奖”。双料荣誉不仅是行业对弘正综合实力的认可,更是对公司未来发展前景的坚定信心与高度期待。

此外,在“十大亮点”活动现场,弘正储能多位技术专家还围绕数字化储能领域前沿议题进行了深入分享,并成功发布D-Fix-DEV-EB20AH型EMS控制单元。该产品具有纯国产、高性能、低功耗等特点,与内置的自研EMS管理系统形成高效组合,具备数据采集、监视控制、能量协调联动、经济优化增效等功能,助力工商业储能场景数智化升级,赢得了业界的广泛赞誉。

作为数字化储能引领者,弘正储能通过不断探索数字技术与储能产业的深度融合,持续提升产品品质,并强化运营服务能力,为客户提供超越期待的智慧能源解决方案。此次展会,弘正储能还重磅发布了新一代微电网能量管理系统,该系统能够满足光伏系统、风力发电、储能系统以及充电桩的接入,灵活适配多能互补、自发自用及无电偏远地区的离网、并网微电网等多种应用场景,在确保系统安全稳定的同时,追求经济最优运行,加速可再生能源的普及与应用。

在全球绿色能源转型与数字化转型的双重浪潮下,弘正数字化储能解决方案,不仅深刻体现了公司对绿色能源未来趋势的精准把握,更紧密贴合了企业能源绿色转型的迫切需求。弘正储能将不断加码技术创新,以更智能、更高效的产品及解决方案为客户持续创造价值,为实现全球零碳目标贡献力量。

t-SNE是一种降维算法,可以将高维数据映射到二维或三维空间中进行可视化。下面是使用C++实现t-SNE的代码示例: ```c++ #include <iostream> #include <fstream> #include <ctime> #include <cmath> #include <cstring> #include <cstdlib> using namespace std; const double PI = 3.14159265358979323846; // 计算欧几里得距离的平方 double euclidean_distance_squared(const double* x, const double* y, int dim) { double dist = 0.0; for (int i = 0; i < dim; i++) { double diff = x[i] - y[i]; dist += diff * diff; } return dist; } // 计算高斯分布的概率密度 double gaussian_kernel(double distance_squared, double perplexity) { return exp(-distance_squared / (2 * perplexity * perplexity)); } // 随机初始化低维嵌入 void initialize_embedding(double* embedding, int n, int dim) { srand(time(NULL)); for (int i = 0; i < n * dim; i++) { embedding[i] = (rand() / (double)RAND_MAX - 0.5) / dim; } } // 计算t-SNE中的梯度和KL散度 void compute_gradient_kl(const double* embedding, const double* P, double* grad, int n, int dim, double perplexity) { const int num_threads = 4; const double eps = 1e-12; memset(grad, 0, n * dim * sizeof(double)); // 计算Q矩阵,即低维空间点之间的相似度矩阵 double* Q = new double[n * n]; memset(Q, 0, n * n * sizeof(double)); for (int i = 0; i < n; i++) { for (int j = i + 1; j < n; j++) { double distance_squared = euclidean_distance_squared(embedding + i * dim, embedding + j * dim, dim); double probability = gaussian_kernel(distance_squared, perplexity); Q[i * n + j] = probability; Q[j * n + i] = probability; } } // 对称化Q矩阵 for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { Q[i * n + j] = (Q[i * n + j] + Q[j * n + i]) / (2 * n); } } // 计算梯度和KL散度 #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < n; i++) { double* grad_i = grad + i * dim; for (int j = 0; j < n; j++) { if (i == j) { continue; } double diff[dim]; double distance_squared = euclidean_distance_squared(embedding + i * dim, embedding + j * dim, dim); double probability = P[i * n + j]; probability /= (eps + P[j * n + i] + probability); probability *= Q[i * n + j]; probability -= (eps + Q[j * n + i]); for (int d = 0; d < dim; d++) { diff[d] = embedding[i * dim + d] - embedding[j * dim + d]; grad_i[d] += probability * diff[d]; } } } // 释放内存 delete[] Q; } // 计算t-SNE中的梯度和KL散度(加速版) void compute_gradient_kl_accelerated(const double* embedding, const double* P, double* grad, int n, int dim, double perplexity) { const int num_threads = 4; const double eps = 1e-12; memset(grad, 0, n * dim * sizeof(double)); // 计算Q矩阵,即低维空间点之间的相似度矩阵 double* Q = new double[n * n]; memset(Q, 0, n * n * sizeof(double)); for (int i = 0; i < n; i++) { for (int j = i + 1; j < n; j++) { double distance_squared = euclidean_distance_squared(embedding + i * dim, embedding + j * dim, dim); double probability = gaussian_kernel(distance_squared, perplexity); Q[i * n + j] = probability; Q[j * n + i] = probability; } } // 对称化Q矩阵 for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { Q[i * n + j] = (Q[i * n + j] + Q[j * n + i]) / (2 * n); } } // 计算梯度和KL散度 #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < n; i++) { double* grad_i = grad + i * dim; for (int j = 0; j < n; j++) { if (i == j) { continue; } double diff[dim]; double distance_squared = euclidean_distance_squared(embedding + i * dim, embedding + j * dim, dim); double probability = P[i * n + j]; probability /= (eps + P[j * n + i] + probability); probability *= Q[i * n + j]; probability -= (eps + Q[j * n + i]); for (int d = 0; d < dim; d++) { diff[d] = embedding[i * dim + d] - embedding[j * dim + d]; grad_i[d] += probability * diff[d]; } } } // 释放内存 delete[] Q; } // 计算t-SNE中的梯度和KL散度(Barnes-Hut加速版) void compute_gradient_kl_bh(const double* embedding, const double* P, double* grad, int n, int dim, double perplexity, double theta) { const int num_threads = 4; const double eps = 1e-12; memset(grad, 0, n * dim * sizeof(double)); // 创建Barnes-Hut树 double* position = new double[n * dim]; memcpy(position, embedding, n * dim * sizeof(double)); BarnesHutTree* tree = new BarnesHutTree(position, n, dim); tree->build(theta); // 计算梯度和KL散度 #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < n; i++) { double* grad_i = grad + i * dim; for (int j = 0; j < n; j++) { if (i == j) { continue; } double distance_squared = euclidean_distance_squared(embedding + i * dim, embedding + j * dim, dim); double probability = P[i * n + j]; probability /= (eps + P[j * n + i] + probability); if (distance_squared > eps && tree->is_far_away(i, j, distance_squared)) { probability *= 0; } else { probability *= gaussian_kernel(distance_squared, perplexity); } probability -= (eps + tree->get_probability(i, j)); double diff[dim]; for (int d = 0; d < dim; d++) { diff[d] = embedding[i * dim + d] - embedding[j * dim + d]; grad_i[d] += probability * diff[d]; } } } // 释放内存 delete tree; delete[] position; } // 计算t-SNE中的梯度和KL散度(Barnes-Hut加速版,多线程) void compute_gradient_kl_bh_multithreaded(const double* embedding, const double* P, double* grad, int n, int dim, double perplexity, double theta) { const int num_threads = 4; const double eps = 1e-12; memset(grad, 0, n * dim * sizeof(double)); // 创建Barnes-Hut树 double* position = new double[n * dim]; memcpy(position, embedding, n * dim * sizeof(double)); BarnesHutTree* tree = new BarnesHutTree(position, n, dim); tree->build(theta); // 计算梯度和KL散度 #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < n; i++) { double* grad_i = grad + i * dim; for (int j = 0; j < n; j++) { if (i == j) { continue; } double distance_squared = euclidean_distance_squared(embedding + i * dim, embedding + j * dim, dim); double probability = P[i * n + j]; probability /= (eps + P[j * n + i] + probability); if (distance_squared > eps && tree->is_far_away(i, j, distance_squared)) { probability *= 0; } else { probability *= gaussian_kernel(distance_squared, perplexity); } probability -= (eps + tree->get_probability(i, j)); double diff[dim]; for (int d = 0; d < dim; d++) { diff[d] = embedding[i * dim + d] - embedding[j * dim + d]; grad_i[d] += probability * diff[d]; } } } // 释放内存 delete tree; delete[] position; } // 随机梯度下降优化t-SNE void optimize_t_sne(double* embedding, const double* X, int n, int dim, double perplexity, int max_iter, double learning_rate, double momentum, bool use_bh, double theta, bool use_multithreaded) { const int num_threads = 4; // 初始化低维嵌入 initialize_embedding(embedding, n, dim); // 计算P矩阵,即高维空间点之间的相似度矩阵 double* P = new double[n * n]; memset(P, 0, n * n * sizeof(double)); for (int i = 0; i < n; i++) { for (int j = i + 1; j < n; j++) { double distance_squared = euclidean_distance_squared(X + i * dim, X + j * dim, dim); double probability = gaussian_kernel(distance_squared, perplexity); P[i * n + j] = probability; P[j * n + i] = probability; } } // 对称化P矩阵 for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { P[i * n + j] = (P[i * n + j] + P[j * n + i]) / (2 * n); } } // 随机梯度下降优化低维嵌入 double* prev_grad = new double[n * dim]; memset(prev_grad, 0, n * dim * sizeof(double)); double* grad = new double[n * dim]; memset(grad, 0, n * dim * sizeof(double)); for (int iter = 0; iter < max_iter; iter++) { if (use_bh) { if (use_multithreaded) { compute_gradient_kl_bh_multithreaded(embedding, P, grad, n, dim, perplexity, theta); } else { compute_gradient_kl_bh(embedding, P, grad, n, dim, perplexity, theta); } } else { compute_gradient_kl(embedding, P, grad, n, dim, perplexity); } for (int i = 0; i < n * dim; i++) { double update = momentum * prev_grad[i] - learning_rate * grad[i]; embedding[i] += update; prev_grad[i] = update; } } // 释放内存 delete[] P; delete[] grad; delete[] prev_grad; } ``` 上面的代码实现了t-SNE的基本功能,包括计算高斯核函数、随机初始化低维嵌入、计算梯度和KL散度、随机梯度下降优化低维嵌入等操作。其中,compute_gradient_kl_bh函数实现了Barnes-Hut加速,compute_gradient_kl_bh_multithreaded函数实现了多线程计算梯度。如果需要进一步优化性能,可以通过调整参数、使用GPU加速等方式进行优化。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值