NW小世界网络模型python代码

 引用参考博文:NW小世界网络模型python代码实现及平均路径聚类系数计算_Q造不出的人工智障的博客-CSDN博客


import random

import networkx as nx

import numpy as np

from matplotlib import pyplot as plt

plt.rcParams["font.sans-serif"]=["SimHei"] #设置字体

plt.rcParams["axes.unicode_minus"]=False #该语句解决图像中的“-”负号的乱码问题

 

 

def small_world(N, d, p):

    A = np.zeros((N, N))

    for i in range(N):

        t = 0

        while t < (d/2):

            A[i][i-(t+1)] = 1

            A[i-(t+1)][i] = 1

            t += 1

 

    for i in range(N):

        t = 0

        while t < (N/2):

            if A[i][i-(t+1)] == 1:

                if random.random() < p:

                    target = random.randint(0,(N-1))

                    while A[i][target] == 1 or target == i:

                        target = random.randint(0,(N-1))

                    #加边

                    A[i][target] = 1

                    A[target][i] = 1

            t += 1

    return A

 

def plot_graph(A, axis=None):

    g = nx.from_numpy_matrix(A)

#     pos  = nx.kamada_kawai_layout(g)

    pos  = nx.circular_layout(g)

    nodesize = []

    maxsize = 100

    minsize = 10

    maxdegree = np.max(np.sum(A,axis=0))

    mindegree = np.min(np.sum(A,axis=0))

    if maxdegree == mindegree:

        nodesize = [maxsize for i in range(len(A))]

    else:

        for node in g:

            size = (np.sum(A[node]) - mindegree)/(maxdegree-mindegree)*(maxsize-minsize)+minsize               #节点大小(节点度数越大,节点越大)

            nodesize.append(size)

 

    nx.draw_networkx_nodes(g, pos=pos,  node_color='yellow', node_size=nodesize, alpha=0.6, ax=axis)

    nx.draw_networkx_edges(g, pos=pos, width=0.3, alpha=0.6, ax=axis)

 

    print('平均最短路径长度',nx.average_shortest_path_length(g))  # 计算平均最短路径长度

    print('平均聚集系数',nx.average_clustering(g))  # 计算平均聚集系数

 

 

print('请输入网络节点总数N:')

N=int(input())

print('请输入规则网络要连的邻接个数k:')

k=int(input())

print('请输入随机化加边概率p:')

p=float(input())

 

A = small_world(N, k, p)

 

 

 

 

fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(6,3))

ax[0].matshow(A, cmap='gray')

plot_graph(A, axis=ax[1])

plt.title('NW小世界网络')

plt.show()

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
以下是一个简单的用Python实现的BP神经网络代码示例: ```python import numpy as np class NeuralNetwork: def __init__(self, layers): self.layers = layers self.num_layers = len(layers) self.weights = [np.random.randn(y, x) for x, y in zip(layers[:-1], layers[1:])] self.biases = [np.random.randn(y, 1) for y in layers[1:]] def sigmoid(self, z): return 1 / (1 + np.exp(-z)) def sigmoid_derivative(self, z): return self.sigmoid(z) * (1 - self.sigmoid(z)) def forward_propagation(self, a): for w, b in zip(self.weights, self.biases): a = self.sigmoid(np.dot(w, a) + b) return a def backward_propagation(self, x, y): activations = [x] zs = [] activation = x for w, b in zip(self.weights, self.biases): z = np.dot(w, activation) + b zs.append(z) activation = self.sigmoid(z) activations.append(activation) delta = self.cost_derivative(activations[-1], y) * self.sigmoid_derivative(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].T) for l in range(2, self.num_layers): z = zs[-l] sp = self.sigmoid_derivative(z) delta = np.dot(self.weights[-l + 1].T, delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l - 1].T) return nabla_b, nabla_w def update_weights(self, mini_batch, learning_rate): nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] for x, y in mini_batch: delta_nabla_b, delta_nabla_w = self.backward_propagation(x, y) nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)] nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] self.weights = [w - (learning_rate / len(mini_batch)) * nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b - (learning_rate / len(mini_batch)) * nb for b, nb in zip(self.biases, nabla_b)] def train(self, training_data, epochs, mini_batch_size, learning_rate): n = len(training_data) for epoch in range(epochs): np.random.shuffle(training_data) mini_batches = [training_data[k:k + mini_batch_size] for k in range(0, n, mini_batch_size)] for mini_batch in mini_batches: self.update_weights(mini_batch, learning_rate) def predict(self, x): return np.argmax(self.forward_propagation(x)) def cost_derivative(self, output_activations, y): return output_activations - y ``` 这个代码实现了一个具有一个输入层、一个或多个隐藏层和一个输出层的BP神经网络。你可以根据自己的需求进行修改和扩展。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Bonefire20

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值