ó ùµÈ[c @s‚dZddddddddd g Zd d lmZd d lmZddlmZd„Zdefd„ƒYZ de fd„ƒYZ de fd„ƒYZ de fd„ƒYZ de fd„ƒYZ de fd„ƒYZdefd„ƒYZdefd„ƒYZdefd„ƒYZde fd„ƒYZdefd„ƒYZdefd „ƒYZd efd!„ƒYZd"S(#s5Definition of various recurrent neural network cells.t Conv1DRNNCellt Conv2DRNNCellt Conv3DRNNCelltConv1DLSTMCelltConv2DLSTMCelltConv3DLSTMCellt Conv1DGRUCellt Conv2DGRUCellt Conv3DGRUCelliÿÿÿÿ(tfloori(t numeric_typesi(tHybridRecurrentCellcCs#td„t||||ƒDƒƒS(NcssU|]K\}}}}|rItt|d|||ddƒdƒndVqdS(iiiN(tintR (t.0txtktptd((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pys !s(ttupletzip(t dimensionstkernelstpaddingst dilations((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyt_get_conv_out_size s t_BaseConvRNNCellcBs\eZdZd d d„Zd„Zd„Zed„ƒZd„Z dd„Z d„Z RS( s*Abstract base class for convolutional RNNscsYtt|ƒjd|d|ƒ||_||_| |_||_t‡fd†|||||gDƒƒs†tdj dˆƒƒ‚t |t ƒr¢|fˆn||_ dˆ|_ t |t ƒrÔ|fˆn||_t |t ƒrù|fˆn||_t |t ƒr|fˆn||_td„|jDƒƒsVtdt|ƒƒ‚t |t ƒrr|fˆn||_|jƒ\|_|_}}|_|_|jjd d |d |d tƒ|_|jjd d |d | d tƒ|_|jjdd ||jfd | d tƒ|_|jjdd ||jfd | d tƒ|_dS(Ntprefixtparamsc3s0|]&}t|tƒp't|ƒˆkVqdS(N(t isinstanceR tlen(R tspec(tdims(se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pys 7ssgFor {dims}D convolution, the convolution settings can only be either int or list/tuple of length {dims}Ricss|]}|ddkVqdS(iiN((R R((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pys Fss+Only support odd number, get h2h_kernel= %st i2h_weighttshapetinittallow_deferred_initt h2h_weightti2h_biasth2h_bias(i( tsuperRt__init__t_hidden_channelst _input_shapet _conv_layoutt _activationtalltAssertionErrortformatRR t _i2h_kernelt_stridet_i2h_padt _i2h_dilatet _h2h_kerneltstrt _h2h_dilatet_decide_shapest _channel_axist _in_channelst_h2h_padt _state_shapeRtgettTrueR R$t _num_gatesR%R&(tselft input_shapethidden_channelst i2h_kernelt h2h_kernelti2h_padt i2h_dilatet h2h_dilateti2h_weight_initializerth2h_weight_initializerti2h_bias_initializerth2h_bias_initializerRt conv_layoutt activationRRti2h_param_shapeth2h_param_shape((Rse/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR('sH             *   c CsI|jjdƒ}|j}||d}|j}|dkrK|d}n |d }||j}|f}|f}|f} t||j|j|jƒ} t d„t |j |j ƒDƒƒ} |dkr||f|j7}||f|j 7}| | 7} n2||j|f7}||j |f7}| | } ||||| | fS(NtCiiÿÿÿÿcss'|]\}}||ddVqdS(iiN((R RR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pys rs( R+tfindR*R)R>RR0R2R3RRR6R4( R?t channel_axisR@t in_channelsRARt total_outRMRNt state_shapet conv_out_sizeth2h_pad((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR7_s2          %    cCs«d}t|dƒr"|d7}n|d7}|d7}|j}|jj}||jdkrcdnd}dj|r||nd|d ƒ}|jd |jjd ||S( Ns{name}({mapping}R,s, {_activation}s, {_conv_layout}t)iiÿÿÿÿs {0} -> {1}itnametmapping( thasattrt__dict__R R!R8R/tNonet __class__t__name__(R?tstattrsR!RRRY((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyt__repr__s     "cCs t|jƒS(N(Rt _gate_names(R?((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR>sc CsØ|jd|d|j|jd|jd|jd|jd|jd|d|d |jd |d ƒ } |jd|d d|j|jd|jd|j d|j d|jd|d|d |jd |d ƒ } | | fS(Ntdatat num_filtertkerneltstridetpadtdilatetweighttbiastlayoutRXti2hith2h( t ConvolutionR)R>R0R1R2R3R+R4R6R:( R?tFtinputststatesR R$R%R&RRlRm((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyt _conv_forward‘s*            icCstdƒ‚dS(Ns8_BaseConvRNNCell is abstract class for convolutional RNN(tNotImplementedError(R?t batch_size((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyt state_infoªscCstdƒ‚dS(Ns8_BaseConvRNNCell is abstract class for convolutional RNN(Rs(R?RoRpRq((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pythybrid_forward­sN( R^t __module__t__doc__R\R(R7RatpropertyR>RrRuRv(((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR%s1   t _ConvRNNCellcBs>eZd„Zdd„Zd„Zed„ƒZd„ZRS(c!Cswtt|ƒjd|d|d|d|d|d|d|d|d |d | d | d | d | d| d|d|ƒdS(NR@RARLRBRDRERCRFRGRHRIRJRRKRR(R'RzR((R?R@RARBRCRDRERFRGRHRIRJRRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(²s  icCs"i|f|jd6|jd6gS(NR!t __layout__(R;R+(R?Rt((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRuÅscCsdS(Ntconv_rnn((R?((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyt_aliasÈscCsdS(Nt(R~((R?((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRbËsc Csjd|j}|j||||||||ƒ\} } |j|| | |jd|dƒ} | | gfS(Nst%d_RXtout(t_counterRrt_get_activationR,( R?RoRpRqR R$R%R&RRlRmtoutput((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRvÏs   (R^RwR(RuR}RyRbRv(((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRz±s    c Bs8eZdZddd d d ddddd d d„ ZRS( s-1D Convolutional RNN cell. .. math:: h_t = tanh(W_i \ast x_t + R_i \ast h_{t-1} + b_i) Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCW' the shape should be (C, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0,) Pad for input convolution. i2h_dilate : int or tuple of int, default (1,) Input convolution dilate. h2h_dilate : int or tuple of int, default (1,) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCW' Layout for all convolution inputs, outputs and weights. Options are 'NCW' and 'NWC'. activation : str or Block, default 'tanh' Type of activation function. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_rnn_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iitzerostNCWttanhc!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(s  (i(i(iN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRÚs , c Bs8eZdZddd d d ddddd d d„ ZRS( s:2D Convolutional RNN cell. .. math:: h_t = tanh(W_i \ast x_t + R_i \ast h_{t-1} + b_i) Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCHW' the shape should be (C, H, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0, 0) Pad for input convolution. i2h_dilate : int or tuple of int, default (1, 1) Input convolution dilate. h2h_dilate : int or tuple of int, default (1, 1) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCHW' Layout for all convolution inputs, outputs and weights. Options are 'NCHW' and 'NHWC'. activation : str or Block, default 'tanh' Type of activation function. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_rnn_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒtNCHWR…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(Js  (ii(ii(iiN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRs , c Bs8eZdZddd d d ddddd d d„ ZRS( sJ3D Convolutional RNN cells .. math:: h_t = tanh(W_i \ast x_t + R_i \ast h_{t-1} + b_i) Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCDHW' the shape should be (C, D, H, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0, 0, 0) Pad for input convolution. i2h_dilate : int or tuple of int, default (1, 1, 1) Input convolution dilate. h2h_dilate : int or tuple of int, default (1, 1, 1) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCDHW' Layout for all convolution inputs, outputs and weights. Options are 'NCDHW' and 'NDHWC'. activation : str or Block, default 'tanh' Type of activation function. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_rnn_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒtNCDHWR…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(s  (iii(iii(iiiN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR`s,t _ConvLSTMCellcBs>eZd„Zdd„Zd„Zed„ƒZd„ZRS(c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d | d | d|d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRRKRLRR(R'RˆR((R?R@RARBRCRDRERFRGRHRIRJRRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(¥s  icCs@i|f|jd6|jd6i|f|jd6|jd6gS(NR!R{(R;R+(R?Rt((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRu¹scCsdS(Nt conv_lstm((R?((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR}½scCsddddgS(Nt_it_ft_ct_o((R?((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRbÀsc Csdd|j}|j||||||||ƒ\} } | | } |j| ddd|dd|jƒ} |j| ddd d|d ƒ} |j| d dd d|d ƒ}|j|| d |jd|dƒ}|j| ddd d|dƒ}|jj||d | |d|dƒ}|jj ||j|||jƒd|dƒ}|||gfS(Nst%d_t num_outputsiRXtslicetaxisitact_typetsigmoidtiitfitcitotstateR( R€Rrt SliceChannelR8t ActivationRR,t _internalt_plust_mul(R?RoRpRqR R$R%R&RRlRmtgatest slice_gatestin_gatet forget_gatet in_transformtout_gatetnext_ctnext_h((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRvÄs     ##&# $ (R^RwR(RuR}RyRbRv(((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRˆ¤s    c Bs8eZdZddd d d ddddd d d„ ZRS( sC 1D Convolutional LSTM network cell. `"Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting" `_ paper. Xingjian et al. NIPS2015 .. math:: \begin{array}{ll} i_t = \sigma(W_i \ast x_t + R_i \ast h_{t-1} + b_i) \\ f_t = \sigma(W_f \ast x_t + R_f \ast h_{t-1} + b_f) \\ o_t = \sigma(W_o \ast x_t + R_o \ast h_{t-1} + b_o) \\ c^\prime_t = tanh(W_c \ast x_t + R_c \ast h_{t-1} + b_c) \\ c_t = f_t \circ c_{t-1} + i_t \circ c^\prime_t \\ h_t = o_t \circ tanh(c_t) \\ \end{array} Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCW' the shape should be (C, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0,) Pad for input convolution. i2h_dilate : int or tuple of int, default (1,) Input convolution dilate. h2h_dilate : int or tuple of int, default (1,) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCW' Layout for all convolution inputs, outputs and weights. Options are 'NCW' and 'NWC'. activation : str or Block, default 'tanh' Type of activation function used in c^\prime_t. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_lstm_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒR„R…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(s  (i(i(iN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRÙs5c Bs8eZdZddd d d ddddd d d„ ZRS( sP 2D Convolutional LSTM network cell. `"Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting" `_ paper. Xingjian et al. NIPS2015 .. math:: \begin{array}{ll} i_t = \sigma(W_i \ast x_t + R_i \ast h_{t-1} + b_i) \\ f_t = \sigma(W_f \ast x_t + R_f \ast h_{t-1} + b_f) \\ o_t = \sigma(W_o \ast x_t + R_o \ast h_{t-1} + b_o) \\ c^\prime_t = tanh(W_c \ast x_t + R_c \ast h_{t-1} + b_c) \\ c_t = f_t \circ c_{t-1} + i_t \circ c^\prime_t \\ h_t = o_t \circ tanh(c_t) \\ \end{array} Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCHW' the shape should be (C, H, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0, 0) Pad for input convolution. i2h_dilate : int or tuple of int, default (1, 1) Input convolution dilate. h2h_dilate : int or tuple of int, default (1, 1) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCHW' Layout for all convolution inputs, outputs and weights. Options are 'NCHW' and 'NHWC'. activation : str or Block, default 'tanh' Type of activation function used in c^\prime_t. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_lstm_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒR†R…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(\s  (ii(ii(iiN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR&s5c Bs8eZdZddd d d ddddd d d„ ZRS( s` 3D Convolutional LSTM network cell. `"Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting" `_ paper. Xingjian et al. NIPS2015 .. math:: \begin{array}{ll} i_t = \sigma(W_i \ast x_t + R_i \ast h_{t-1} + b_i) \\ f_t = \sigma(W_f \ast x_t + R_f \ast h_{t-1} + b_f) \\ o_t = \sigma(W_o \ast x_t + R_o \ast h_{t-1} + b_o) \\ c^\prime_t = tanh(W_c \ast x_t + R_c \ast h_{t-1} + b_c) \\ c_t = f_t \circ c_{t-1} + i_t \circ c^\prime_t \\ h_t = o_t \circ tanh(c_t) \\ \end{array} Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCDHW' the shape should be (C, D, H, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0, 0, 0) Pad for input convolution. i2h_dilate : int or tuple of int, default (1, 1, 1) Input convolution dilate. h2h_dilate : int or tuple of int, default (1, 1, 1) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCDHW' Layout for all convolution inputs, outputs and weights. Options are 'NCDHW' and 'NDHWC'. activation : str or Block, default 'tanh' Type of activation function used in c^\prime_t. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_lstm_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒR‡R…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(©s  (iii(iii(iiiN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRss5t _ConvGRUCellcBs>eZd„Zdd„Zd„Zed„ƒZd„ZRS(c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d | d | d|d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRRKRLRR(R'R¥R((R?R@RARBRCRDRERFRGRHRIRJRRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(Ás  icCs"i|f|jd6|jd6gS(NR!R{(R;R+(R?Rt((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRuÔscCsdS(Ntconv_gru((R?((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR}×scCs dddgS(Nt_rt_zR((R?((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRbÚsc CsEd|j}|j||||||||ƒ\} } |j| ddd|dd|jƒ\} } } |j| ddd|dd|jƒ\} }} |j| | dd d|d ƒ}|j| |dd d|d ƒ}|j|| || |jd|d ƒ}|jjd ||||dd|dƒ}||gfS(Nst%d_RŽiRXt i2h_sliceRt h2h_sliceR‘R’tr_acttz_actth_actgð?iR( R€RrR˜R8R™RR,RšR›(R?RoRpRqR R$R%R&RRlRmti2h_rti2h_zth2h_rth2h_zt reset_gatet update_gatet next_h_tmpR¤((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRvÞs&       " (R^RwR(RuR}RyRbRv(((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR¥Às    c Bs8eZdZddd d d ddddd d d„ ZRS( sR 1D Convolutional Gated Rectified Unit (GRU) network cell. .. math:: \begin{array}{ll} r_t = \sigma(W_r \ast x_t + R_r \ast h_{t-1} + b_r) \\ z_t = \sigma(W_z \ast x_t + R_z \ast h_{t-1} + b_z) \\ n_t = tanh(W_i \ast x_t + b_i + r_t \circ (R_n \ast h_{t-1} + b_n)) \\ h^\prime_t = (1 - z_t) \circ n_t + z_t \circ h \\ \end{array} Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCW' the shape should be (C, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0,) Pad for input convolution. i2h_dilate : int or tuple of int, default (1,) Input convolution dilate. h2h_dilate : int or tuple of int, default (1,) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCW' Layout for all convolution inputs, outputs and weights. Options are 'NCW' and 'NWC'. activation : str or Block, default 'tanh' Type of activation function used in n_t. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_gru_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒR„R…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(+s  (i(i(iN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRús0c Bs8eZdZddd d d ddddd d d„ ZRS( s_ 2D Convolutional Gated Rectified Unit (GRU) network cell. .. math:: \begin{array}{ll} r_t = \sigma(W_r \ast x_t + R_r \ast h_{t-1} + b_r) \\ z_t = \sigma(W_z \ast x_t + R_z \ast h_{t-1} + b_z) \\ n_t = tanh(W_i \ast x_t + b_i + r_t \circ (R_n \ast h_{t-1} + b_n)) \\ h^\prime_t = (1 - z_t) \circ n_t + z_t \circ h \\ \end{array} Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCHW' the shape should be (C, H, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0, 0) Pad for input convolution. i2h_dilate : int or tuple of int, default (1, 1) Input convolution dilate. h2h_dilate : int or tuple of int, default (1, 1) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCHW' Layout for all convolution inputs, outputs and weights. Options are 'NCHW' and 'NHWC'. activation : str or Block, default 'tanh' Type of activation function used in n_t. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_gru_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒR†R…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(ss  (ii(ii(iiN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRBs0c Bs8eZdZddd d d ddddd d d„ ZRS( so 3D Convolutional Gated Rectified Unit (GRU) network cell. .. math:: \begin{array}{ll} r_t = \sigma(W_r \ast x_t + R_r \ast h_{t-1} + b_r) \\ z_t = \sigma(W_z \ast x_t + R_z \ast h_{t-1} + b_z) \\ n_t = tanh(W_i \ast x_t + b_i + r_t \circ (R_n \ast h_{t-1} + b_n)) \\ h^\prime_t = (1 - z_t) \circ n_t + z_t \circ h \\ \end{array} Parameters ---------- input_shape : tuple of int Input tensor shape at each time step for each sample, excluding dimension of the batch size and sequence length. Must be consistent with `conv_layout`. For example, for layout 'NCDHW' the shape should be (C, D, H, W). hidden_channels : int Number of output channels. i2h_kernel : int or tuple of int Input convolution kernel sizes. h2h_kernel : int or tuple of int Recurrent convolution kernel sizes. Only odd-numbered sizes are supported. i2h_pad : int or tuple of int, default (0, 0, 0) Pad for input convolution. i2h_dilate : int or tuple of int, default (1, 1, 1) Input convolution dilate. h2h_dilate : int or tuple of int, default (1, 1, 1) Recurrent convolution dilate. i2h_weight_initializer : str or Initializer Initializer for the input weights matrix, used for the input convolutions. h2h_weight_initializer : str or Initializer Initializer for the recurrent weights matrix, used for the input convolutions. i2h_bias_initializer : str or Initializer, default zeros Initializer for the input convolution bias vectors. h2h_bias_initializer : str or Initializer, default zeros Initializer for the recurrent convolution bias vectors. conv_layout : str, default 'NCDHW' Layout for all convolution inputs, outputs and weights. Options are 'NCDHW' and 'NDHWC'. activation : str or Block, default 'tanh' Type of activation function used in n_t. If argument type is string, it's equivalent to nn.Activation(act_type=str). See :func:`~mxnet.ndarray.Activation` for available choices. Alternatively, other activation blocks such as nn.LeakyReLU can be used. prefix : str, default 'conv_gru_' Prefix for name of layers (and name of weight if params is None). params : RNNParams, default None Container for weight sharing between cells. Created if None. iiRƒR‡R…c!Cswtt|ƒjd|d|d|d|d|d|d|d|d | d | d | d d d| d| d|d|ƒdS(NR@RARBRCRDRERFRGRHRIRJRiRKRLRR(R'RR((R?R@RARBRCRDRERFRGRHRIRJRKRLRR((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyR(»s  (iii(iii(iiiN(R^RwRxR\R((((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyRŠs0N(Rxt__all__tmathR tbaseR trnnR RRRzRRRRˆRRRR¥RRR(((se/usr/local/lib/python2.7/site-packages/mxnet-1.3.1-py2.7.egg/mxnet/gluon/contrib/rnn/conv_rnn_cell.pyts(   Œ)CCD5MMM:HH