简单的例子:
- https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-7d2250723780
- https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0
- 《Semi-Supervised Classification with Graph Convolutional Networks》论文解说:https://zhuanlan.zhihu.com/p/31067515
- 《Semi-Supervised Classification with Graph Convolutional Networks》代码分析:
- 先定义了
Layer
层,主要作用是:对每层的name做了命名,还用一个参数决定是否做log
class Layer(object):
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
# return class name
layer = self.__class__.__name__.lower()
# 形成了一个新的类名 : trick
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
# return False when get failed.
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
def __call__(self, inputs):
def _log_vars(self):
__call__
的作用让 Layer
的实例成为可调用对象;
- 根据
Layer
继承得到denseNet
,考虑到的参数:
- input_dim
- output_dim
- placehoders :作为计算图中的待填充数据都放在这里;在
train.py
定义为:
# Define placeholders
placeholders = {
'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
'labels': tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),
'labels_mask': tf.placeholder(tf.int32),
'dropout': tf.placeholder_with_default(0., shape=()),
'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout
}
- dropout
- sparse_inputs :是否是稀疏数据
- act : 激活函数
- bias:是否有偏置
- featureless:输入的数据带不带特征矩阵
- **kwargs:基类中的参数,name还有logging。
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
retur