Convolution neural Network--code implementation

Source: Internet
Author: User
Import NumPy as NP class Reluactivator (object): Def forward (self, weighted_input): #return weighted_input Return Max (0, Weighted_input) def backward (self, output): Return 1 if output > 0 else 0 Class Iden Tityactivator (object): Def forward (self, weighted_input): Return weighted_input def backward (self, output  ): Return 1 def get_patch (Input_array, I, J, Filter_width, Filter_height, stride): Start_i = I * Stride Start_j = J * Stride If Input_array.ndim = = 2:return input_array[Start_i:star T_i + filter_height, Start_j:start_j + filter_width] elif Input_array.ndim = 3:return input_a rray[:, Start_i:start_i + filter_height, Start_j:start_j + filter_width] def get_max_index (array): max_i = 0 Max_j = 0 max_value = array[0,0] for i in range (array.shape[0)): for J in range
      (Array.shape[1]):      If array[i,j] > max_value:max_value = array[i,j] max_i, Max_j = i, J return Max_i, Max_j def conv (Input_array, Kernel_array, Output_array, stride, bias): Channel _number = Input_array.ndim output_width = output_array.shape[1] Output_height = output_array.shape[0] Kernel_w Idth = kernel_array.shape[-1] Kernel_height = kernel_array.shape[-2] for i in range (Output_height): for J 
                    In range (Output_width): output_array[i][j] = (Get_patch (Input_array, I, J, Kernel_width, 

    Kernel_height, Stride) * Kernel_array). SUM () + Bias def padding (Input_array, ZP): if ZP = = 0:return Input_array else:if input_array.ndim = 3:input_width = Input_a RRAY.SHAPE[2] input_height = input_array.shape[1] input_depth = input_array.shape[0] P Added_array = Np.zeros (
                Input_depth, Input_height + 2 * ZP, Input_width + 2 * ZP)) p added_array[:, Zp:zp + input_height, Zp:zp + input_width] = Input_array RE Turn Padded_array elif Input_array.ndim = = 2:input_width = input_array.shape[1] Input_hei ght = input_array.shape[0] Padded_array = Np.zeros (input_height + 2 * ZP, in 
            Put_width + 2 * ZP)) Padded_array[zp:zp + input_height, Zp:zp + input_width] = Input_array Return Padded_array # to numpy array for element wise operation def element_wise_op (Array, op): For I in Np.nditer (array , op_flags=[' ReadWrite ']): i[...] = OP (i) Class Filter (object): Def __init__ (self, WI DTH, height, depth): Self.weights = Np.random.uniform ( -1e-4, 1e-4, (depth, height, width)) sel F.bias = 0 self.Weights_grad = Np.zeros (self.weights.shape) Self.bias_grad = 0 def __repr__ (self): retur
        N ' filter weights:\n%s\nbias:\n%s '% (repr (self.weights), repr (Self.bias)) def get_weights (self): Return self.weights def get_bias (self): return self.bias def update (self, learning_rate): Self . weights-= learning_rate * Self.weights_grad Self.bias-= learning_rate * Self.bias_grad class Convlayer (object ): Def __init__ (self, input_width, input_height, Channel_number, Filter_width, fil Ter_height, Filter_number, zero_padding, stride, Activator, learning_rate): Sel
        F.input_width = Input_width Self.input_height = input_height Self.channel_number = Channel_number
        Self.filter_width = Filter_width Self.filter_height = filter_height Self.filter_number = Filter_number Self.zero_padding = Zero_padding Self.stride = Stride Self.output_width = \ Convlayer.calculate_output_size ( Self.input_width, Filter_width, zero_padding, stride) Self.output_height = \ Convl Ayer.calculate_output_size (Self.input_height, Filter_height, zero_padding, Stride) SELF.O Utput_array = Np.zeros (self.filter_number, int (self.output_height), int (self.output_width)) self. 
                filters = [] for I in range (Filter_number): Self.filters.append filter (Filter_width, Filter_height, self.channel_number)) Self.activator = Activator self.learning_rate = Learning_rate de F Forward (self, input_array): Self.input_array = Input_array Self.padded_input_array = padding (Input_arra
            Y, self.zero_padding) for F in Range (Self.filter_number): Filter = Self.filters[f] Conv (self.padded_inPut_array, Filter.get_weights (), self.output_array[f], Self.stride, Filter.get_bias ()) Element_wise_op (Self.output_array, Self.activator.forward) def backward (self, input_ar Ray, Sensitivity_array, Activator): Self.forward (Input_array) Self.bp_sensitivity_map (s Ensitivity_array, Activator) self.bp_gradient (sensitivity_array) def update (s  ELF): For filter in Self.filters:filter.update (self.learning_rate) def bp_sensitivity_map (self,
            Sensitivity_array, activator): Expanded_array = Self.expand_sensitivity_map ( Sensitivity_array) Nded_width = expanded_array.shape[2] ZP = (self.input_width + self.filt Er_width-1-Expanded_width)/2 Padded_array = padding (expanded_array, int (ZP)) Self.delta_array = SE
Lf.create_delta_array ()
        For f in Range (Self.filter_number): Filter = self.filters[f] Flipped_weights = Np.array (

            [Np.rot90 (I, 2) for I in Filter.get_weights ()]) Delta_array = Self.create_delta_array () for D in range (Delta_array.shape[0]): Conv (Padded_arra Y[F], flipped_weights[d], delta_array[d], 1, 0) Self.delta_array + = Delta_array D Erivative_array = Np.array (Self.input_array) element_wise_op (Derivative_array, Activator. Backward) Self.delta_array *= Derivative_array def bp_gradient (self, sensitivity_array): Expanded_ar

            Ray = Self.expand_sensitivity_map (Sensitivity_array) for F in Range (Self.filter_number): Filter = Self.filters[f] for D in range (Filter.weights.shape[0]): Conv (Self.padded_input_arra Y[D], expanded_array[f], filter.weights_grAd[d], 1, 0) # Calculates the gradient of the offset Filter.bias_grad = Expanded_array[f].sum () def Expand_sensitivity_map 
            (Self, sensitivity_array): depth = sensitivity_array.shape[0] Expanded_width = (Self.input_width- Self.filter_width + 2 * self.zero_padding + 1) expanded_height = (self.input_height-self.fil 
                                 Ter_height + 2 * self.zero_padding + 1) expand_array = Np.zeros (Depth, expanded_height, Expanded_width)) for I in range (int (self.output_height)): for j in range (int) (Self.output_ width): I_pos = i * self.stride j_pos = J * Self.stride Expand_array[:,i_ 
        Pos,j_pos] = \ Sensitivity_array[:,i,j] return Expand_array def create_delta_array (self): Return Np.zeros ((Self.channel_number, Self.input_height, self.input_width)) @staticmethod de F Calculate_output_size (Input_size, Filter_size, zero_padding, Stride): Return (input_size-filter_size + 2 * ze 
                 ro_padding)/Stride + 1 class Maxpoolinglayer (object): Def __init__ (self, input_width, input_height,
        Channel_number, Filter_width, Filter_height, stride): Self.input_width = Input_width
        Self.input_height = input_height Self.channel_number = Channel_number Self.filter_width = filter_width 
            Self.filter_height = filter_height Self.stride = Stride Self.output_width = (Input_width- Filter_width)/self.stride + 1 self.output_height = (input_height-filter_height)/self.stride + 1 Self.output_array = Np.zeros (self.channel_number, int (self.output_height), int (Self.output_widt h)) def forward (self, input_array): for D in Range (Self.channel_number): For I in range (int (sel
     F.output_height)):           For j in range (int (self.output_width)): self.output_array[d,i,j] = ( Get_patch (Input_array[d], I, J, Self.filter_width, self.fi
        Lter_height, Self.stride). Max () def backward (self, Input_array, Sensitivity_array): Self.delta_array = Np.zeros (input_array.shape) for D in range (Self.channel_number): For I in rang E (int (self.output_height)): for j in range (int (self.output_width)): Patch_array = ge 
                        T_patch (Input_array[d], I, J, Self.filter_width,
                    Self.filter_height, Self.stride) k, L = Get_max_index (Patch_array)  Self.delta_array[d, I * self.stride + K, J * self.stride + L]
                   = \     SENSITIVITY_ARRAY[D,I,J] def init_test (): a = Np.array ([[[[0,1,1,0,2], [2,2,2,2,1], [1,0,0,2,0], [0,1,1,0,0], [1,2,0,0,2]], [[1,0,2,2,0], [0,0,0,2,0], [1, 2, 1,2,1], [1,0,0,0,0], [1,2,1,1,1]], [[2,1,2,0,0], [1,0,0,1,0], [0,2,1,0,1] , [0,1,2,2,2], [2,1,0,0,1]]) b = Np.array ([[[0,1,1], [2,2,2], [1,0,0 ]], [[1,0,2], [0,0,0], [1,2,1]]] cl = Convlayer (5,5,3,3,3,2,1,2,identityactivator (), 0.00
          1) cl.filters[0].weights = Np.array ([[[[ -1,1,0], [0,1,0], [0,1,1]], [[ -1,-1,0],
    [0,0,0], [0,-1,0]], [[[0,0,-1], [0,1,0], [1,-1,-1]]], Dtype=np.float64
         Cl.filters[0].bias=1 cl.filters[1].weights = Np.array ([[[1,1,-1], [ -1,-1,1], [0,-1,1]],
     [[0,1,0],    [ -1,0,-1], [ -1,1,0]], [[ -1,0,0], [ -1,0,1], [ -1,0,0]]], Dtype=np.float64 re Turn A, B, cl def test (): A, B, cl = Init_test () cl.forward (a) print (Cl.output_array) def TEST_BP (): A , b, cl = Init_test () Cl.backward (A, B, Identityactivator ()) cl.update () print (cl.filters[0)) print (CL.F


    ILTERS[1]) def gradient_check (): Error_function = Lambda o:o.sum () A, B, cl = Init_test () Cl.forward (a) Sensitivity_array = Np.ones (Cl.output_array.shape, Dtype=np.float64) Cl.backwar D (A, Sensitivity_array, Identityactivator ()) epsilon = 10e-4 for D in range (Cl.filters[0].weigh Ts_grad.shape[0]): For I in range (Cl.filters[0].weights_grad.shape[1]): for j in Range (CL.FILTERS[0].W
                EIGHTS_GRAD.SHAPE[2]): cl.filters[0].weights[d,i,j] + = Epsilon Cl.forward (a) ERR1 = Error_function(Cl.output_array) cl.filters[0].weights[d,i,j]-= 2*epsilon Cl.forward (a) ERR2 = Error_function (cl.output_array) Expect_grad = (ERR1-ERR2)/(2 * epsilon) cl.filt
                    ERS[0].WEIGHTS[D,I,J] + + epsilon print (' Weights (%d,%d,%d): expected-actural%f-%f '% ( D, I, J, Expect_grad, Cl.filters[0].weights_grad[d,i,j]) def init_pool_test (): a = Np.array ([[[1,1,2 , 4], [5,6,7,8], [3,2,1,0], [1,2,3,4]], [[0,1,2,3], [4,5,6,7], [
          8,9,0,1], [3,4,5,6]]], dtype=np.float64) b = Np.array ([[[[1,2], [2,4]], [[3,5],
    [8,2]]], dtype=np.float64) MPL = Maxpoolinglayer (4,4,2,2,2,2) return a, B, Mpl def test_pool (): A, b, MPL = Init_pool_test () mpl.forward (a) print (' Input array:\n%s\noutput array:\n%s '% (A, mpl.output_ Array)) def Test_pool_BP (): A, B, MPL = Init_pool_test () Mpl.backward (A, b) print (' Input array:\n%s\nsensitivity Array:\n%s\ndelta array:\n%s '% (A, B, Mpl.delta_array)) if __name__ = = ' __main__ ': Test () TEST_BP () Test_pool () t EST_POOL_BP () Gradient_check ()

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.