The first step is to define the convolution kernel class:
Class Filter (object): # Filter Class initializes the convolution core def __init__ (self,width,height,depth): # Initialize the filter Parameter self.weights=np.random.uniform ( -1e-4,1e-4, (depth,height,width)) self.bias=0 self.weights _grad=np.zeros (self.weights.shape) self.bias_grad=0 def get_weights (self): return self.weights def get_bias (self): return Self.bias def update_weight (self,learning_rate): self.weights-= Self.weights_grad*learning_rate Self.bias-=self.bias_grad*learning_rate
Defining convolutional Layers
def CONV (input_array,kernel_array,output_array,stride,bias): Channel_number=input_array.ndim Output_ WIDTH=OUTPUT_ARRAY.SHAPE[1] output_height=output_array.shape[0] kernel_width=kernel_array.shape[-1] Kernel_height=kernel_array.shape[-2] for i in range (output_height): for J in Range (Output_width):
# Get_patch Gets the block output_array[i][j]= (Get_patch (input_array,i,j,kernel_width,kernel_height,stride)) of the image corresponding to the i,j position Kernel_array). SUM () +bias
Define the padding function: 0 padding based on the size of the extension
def padding (Input_array, zero_padding): if zero_padding = = 0:return Inpu T_array else:if Input_array.ndim = = 3:input_width = input_array.shape[2] Input_height = INPUT_ARRAY.SHAPE[1] input_depth = input_array.shape[0] Padded_array = Np.zeros (input_depth, Input_ Height + 2 * zero_padding, Input_width + 2 * zero_padding)) Padded_arra y[:, zero_padding:zero_padding + input_height, zero_padding:zero_padding + input_width] = Input_array El if Input_array.ndim = = 2:input_width = input_array.shape[1] Input_height = input_array.shape[0] Padded_array = Np.zeros ((input_height + 2 * zero_padding, Input_width + 2 * zero_padding)) Padded_array [Zero_padding:zero_padding + input_width, zero_padding:zero_padding + input_height] = Input_array return Padded_array
Define the Convolution class:
def calculate_output_size (input_size,filter_size,zero_padding,stride): return (Input_size-filter_size+2*zero _padding)/stride+1
Class Convlayer (object): Def __init__ (Self,input_width,input_height,channel_number, Filter_width,filter _height,filter_number,zero_padding,stride, activator,learning_rate): Self.input_width=input_width Self.input_height=input_height Self.channel_number=channel_number Self.filter_width=filter_width Self.filter_height=filter_height self.filter_number=filter_number self.zero_padding=zero_padding SE Lf.stride=stride
# according to (F-W+2P)/2+1 self.outpu_width=convlayer.calculate_output_size (Self.input_width, Filter_width,zero_padding,stride) self.output_height=convlayer.calculate_output_size (self. Input_height, Filter_height,zero_padding, Stride
# get padding after image Self.output_array=np.zeros (self.filter_number,self.output_width,self.output_height) # The O Utput of the convolution
# Initialize Filters
self.filters=[] # Initialize filters for I in range (Filter_number): Self.filters.append (Filter (FI Lter_width,filter_height,self.channel_number)) Self.activator=activator self.learning_rate=learning_rate # Expand the sensitivity graph Def expand_sentivity_map (Self,sensitivity_array): depth=sensitivity_array.shape[0] Expanded_ Width= (self.input_width-self.filter_width+2*self.zero_padding+1) expanded_height= (self.input_height-self.filter _height+2*self.zero_padding+1) Expand_array=np.zeros ((depth,expanded_height,expanded_width)) for I in range ( Self.output_height): For J in Range (Self.output_width):
I_pos=i*self.stride J_pos=j*self.stride Expand_array[:,i_pos,j_pos]=sensitivity_array[:,i,j] Return Expand_array
# Create sensitivity Matrix def create_delta_array (self): return Np.zeros (self.channnel_number,self.input_height,self.input_width ) # Forward Pass def Forward (Self,input_array): Self.input_array=input_array # First pad image to the size nee Ded self.padded_input_array=padding (input_array,self.zero_padding) for F in Range (Self.filter_number): Filter=self.filters[f] Conv (self.paded_input_array,filter.get_weights (), Filter.get_bias ()) element _wise_op (Self.output_array,self.acitator.forward) # Reverse Pass
def bp_sensitivity_map (self, sensitivity_array,activator): # padding Sensitivity map Expanded_array=self.exp And_sentivity_map (Sensitivity_array) expanded_width=expanded_array.shape[2] zp= (self.input_width+self.filter _width-1-expanded_width)/2 padded_array=padding (EXPANDED_ARRAY,ZP) Self.delta_array=self.create_delta_array ( ) for F in Range (Self.filter_number): filter=self.filter[f] Filpped_weights=np.array (Map (Lamb Da I:np.rot90 (i,2), Filter.get_weights ())) Delta_array=self.create_delta_array () for D in range (Del Ta_array.shape[0]) conv (padded_array[f],filpped_weights[d],delta_array[d],1,0) self.delta_array+ =delta_array Derivative_array=np.array (Self.input_array) element_wise_op (derivative_array,activator. Backward) Self.delta_array*=derivative_array
# The gradient of the parameter is the input multiplied by the sensitivity matrix Def bp_gradient (Self,sensitivity_array): Expanded_array=self.expand_sensitivity_map (Sensiti Vity_array) for F in Range (Self.filter_number): Filter=self.filter[f] for D in range (FILTER.W Eights.shape[0]): Conv (self.padded_input_array[d],expanded_array[f],filter.weights_grad[d],1,0) Filter.bias_grad=expanded_array[f].sum ()
# Update def update (self) on parameter: for filter in Self.filters:filter.update (self.learning_rate)
Python implements a simple convolutional network framework