from theano.tensor.signal import downsample

 ImportError: cannot import name downsample

参考:https://blog.csdn.net/u013972559/article/details/86020388

测试安装过程中遇见了错误,是因为theano里面的文件变化了,没有downsample文件了,所有有些函数变到其他文件了,所以专门再次记录一下:

在较早的版本中,可能会使用from theano.tensor.signal.downsample import max_pool_2d。根据运行提示,更新到 from theano.tensor.signal.pool import pool_2d 中了。查看了一下theano下面的包,发现在signal(例如我的目录:D:\Anaconda3\envs\theano\Lib\site-packages\theano\tensor\signal)下面没有了downsample了,只有pool和conv了。如果以后再用downsample的时候,很可能就是调用这两个包里面的东西,改一下就好了。出问题的文件在pool.py文件中,所以我们需要对其进行修改。

在你安装lasagne的文件下,conda环境中可以的环境下可以找到(例如我的Anaconda安装在D:盘,theano安装在虚拟的theano环境下,所以lasagne下的pool.py文件在:(D:\Anaconda3\envs\theano\Lib\site-packages\lasagne\layers)

需要对其进行修改,修改之处有三,注意查看。

import theano.tensor as T
 
from .base import Layer
from ..utils import as_tuple
 
 
#修改之处①。不再引用downsample 
#from theano.tensor.signal.pool import downsample
from theano.tensor.signal import pool
 
__all__ = [
    "MaxPool1DLayer",
    "MaxPool2DLayer",
    "Pool2DLayer",
    "FeaturePoolLayer",
    "FeatureWTALayer",
    "GlobalPoolLayer",
]
 
 
def pool_output_length(input_length, pool_size, stride, pad, ignore_border):
    if input_length is None or pool_size is None:
        return None
 
    if ignore_border:
        output_length = input_length + 2 * pad - pool_size + 1
        output_length = (output_length + stride - 1) // stride
 
    else:
        assert pad == 0
 
        if stride >= pool_size:
            output_length = (input_length + stride - 1) // stride
        else:
            output_length = max(
                0, (input_length - pool_size + stride - 1) // stride) + 1
 
    return output_length
 
 
class MaxPool1DLayer(Layer):
 
    def __init__(self, incoming, pool_size, stride=None, pad=0,
                 ignore_border=True, **kwargs):
        super(MaxPool1DLayer, self).__init__(incoming, **kwargs)
        self.pool_size = as_tuple(pool_size, 1)
        self.stride = self.pool_size if stride is None else as_tuple(stride, 1)
        self.pad = as_tuple(pad, 1)
        self.ignore_border = ignore_border
 
    def get_output_shape_for(self, input_shape):
        output_shape = list(input_shape)  # copy / convert to mutable list
 
        output_shape[-1] = pool_output_length(input_shape[-1],
                                              pool_size=self.pool_size[0],
                                              stride=self.stride[0],
                                              pad=self.pad[0],
                                              ignore_border=self.ignore_border,
                                              )
 
        return tuple(output_shape)
 
    def get_output_for(self, input, **kwargs):
        input_4d = T.shape_padright(input, 1)
        #修改之处②,原来的函数变化了一下,主要是方法变了
        #pooled = downsample.max_pool_2d(input_4d,
        #                                ds=(self.pool_size[0], 1),
        #                                st=(self.stride[0], 1),
        #                                ignore_border=self.ignore_border,
        #                                padding=(self.pad[0], 0),
        #                                )
        pooled = pool.pool_2d(input_4d,
                              ds=(self.pool_size[0], 1),
                              st=(self.stride[0], 1),
                              ignore_border=self.ignore_border,
                              padding=(self.pad[0], 0),
                              )
 
        return pooled[:, :, :, 0]
 
 
class Pool2DLayer(Layer):
 
    def __init__(self, incoming, pool_size, stride=None, pad=(0, 0),
                 ignore_border=True, mode='max', **kwargs):
        super(Pool2DLayer, self).__init__(incoming, **kwargs)
 
        self.pool_size = as_tuple(pool_size, 2)
 
        if stride is None:
            self.stride = self.pool_size
        else:
            self.stride = as_tuple(stride, 2)
 
        self.pad = as_tuple(pad, 2)
 
        self.ignore_border = ignore_border
        self.mode = mode
 
    def get_output_shape_for(self, input_shape):
        output_shape = list(input_shape)  # copy / convert to mutable list
 
        output_shape[2] = pool_output_length(input_shape[2],
                                             pool_size=self.pool_size[0],
                                             stride=self.stride[0],
                                             pad=self.pad[0],
                                             ignore_border=self.ignore_border,
                                             )
 
        output_shape[3] = pool_output_length(input_shape[3],
                                             pool_size=self.pool_size[1],
                                             stride=self.stride[1],
                                             pad=self.pad[1],
                                             ignore_border=self.ignore_border,
                                             )
 
        return tuple(output_shape)
 
    def get_output_for(self, input, **kwargs):
        # 修改之处③,方法变了,属性ds也变成了ws
        #pooled = downsample.max_pool_2d(input,
        #                                ds=self.pool_size,
        #                                st=self.stride,
        #                                ignore_border=self.ignore_border,
        #                                padding=self.pad,
        #                                mode=self.mode,
        #                                )
        pooled = pool.pool_2d(input,
                              ws=self.pool_size,
                              ignore_border=self.ignore_border,
                              padding=self.pad,
                              mode=self.mode,
                              )
        return pooled
 
 
class MaxPool2DLayer(Pool2DLayer):
 
    def __init__(self, incoming, pool_size, stride=None, pad=(0, 0),
                 ignore_border=True, **kwargs):
        super(MaxPool2DLayer, self).__init__(incoming,
                                             pool_size,
                                             stride,
                                             pad,
                                             ignore_border,
                                             mode='max',
                                             **kwargs)
 
# TODO: add reshape-based implementation to MaxPool*DLayer
# TODO: add MaxPool3DLayer
 
 
class FeaturePoolLayer(Layer):
 
    def __init__(self, incoming, pool_size, axis=1, pool_function=T.max,
                 **kwargs):
        super(FeaturePoolLayer, self).__init__(incoming, **kwargs)
        self.pool_size = pool_size
        self.axis = axis
        self.pool_function = pool_function
 
        num_feature_maps = self.input_shape[self.axis]
        if num_feature_maps % self.pool_size != 0:
            raise ValueError("Number of input feature maps (%d) is not a "
                             "multiple of the pool size (pool_size=%d)" %
                             (num_feature_maps, self.pool_size))
 
    def get_output_shape_for(self, input_shape):
        output_shape = list(input_shape)  # make a mutable copy
        output_shape[self.axis] = input_shape[self.axis] // self.pool_size
        return tuple(output_shape)
 
    def get_output_for(self, input, **kwargs):
        input_shape = tuple(input.shape)
        num_feature_maps = input_shape[self.axis]
        num_feature_maps_out = num_feature_maps // self.pool_size
 
        pool_shape = (input_shape[:self.axis] +
                      (num_feature_maps_out, self.pool_size) +
                      input_shape[self.axis+1:])
 
        input_reshaped = input.reshape(pool_shape)
        return self.pool_function(input_reshaped, axis=self.axis + 1)
 
 
class FeatureWTALayer(Layer):
 
    def __init__(self, incoming, pool_size, axis=1, **kwargs):
        super(FeatureWTALayer, self).__init__(incoming, **kwargs)
        self.pool_size = pool_size
        self.axis = axis
 
        num_feature_maps = self.input_shape[self.axis]
        if num_feature_maps % self.pool_size != 0:
            raise ValueError("Number of input feature maps (%d) is not a "
                             "multiple of the region size (pool_size=%d)" %
                             (num_feature_maps, self.pool_size))
 
    def get_output_for(self, input, **kwargs):
        num_feature_maps = input.shape[self.axis]
        num_pools = num_feature_maps // self.pool_size
 
        pool_shape = ()
        arange_shuffle_pattern = ()
        for k in range(self.axis):
            pool_shape += (input.shape[k],)
            arange_shuffle_pattern += ('x',)
 
        pool_shape += (num_pools, self.pool_size)
        arange_shuffle_pattern += ('x', 0)
 
        for k in range(self.axis + 1, input.ndim):
            pool_shape += (input.shape[k],)
            arange_shuffle_pattern += ('x',)
 
        input_reshaped = input.reshape(pool_shape)
        max_indices = T.argmax(input_reshaped, axis=self.axis + 1,
                               keepdims=True)
 
        arange = T.arange(self.pool_size).dimshuffle(*arange_shuffle_pattern)
        mask = T.eq(max_indices, arange).reshape(input.shape)
 
        return input * mask
 
 
class GlobalPoolLayer(Layer):
 
    def __init__(self, incoming, pool_function=T.mean, **kwargs):
        super(GlobalPoolLayer, self).__init__(incoming, **kwargs)
        self.pool_function = pool_function
 
    def get_output_shape_for(self, input_shape):
        return input_shape[:2]
 
    def get_output_for(self, input, **kwargs):
        return self.pool_function(input.flatten(3), axis=2)

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值