The VGG model we use is a model of a 19-layer parameter that someone else has trained.
First step: Define the volume integral part operation function
mport Scipy.ioImportNumPy as NPImportOSImportScipy.miscImportMatplotlib.pyplot as PltImportTensorFlow as TF#to perform a convolution operationdef_conv_layer (input, weights, bias): Conv= tf.nn.conv2d (input, tf.constant (weights), strides= (1, 1, 1, 1), padding='same') returnTf.nn.bias_add (conv, bias)#Pooling Operationsdef_pool_layer (Input):returnTf.nn.max_pool (Input, ksize= (1, 2, 2, 1), strides= (1, 2, 2, 1), padding='same')#operation to perform the mean valuedefpreprocess (Image, Mean_pixel):returnImage-Mean_pixeldefunprocess (Image, Mean_pixel):returnImage +Mean_pixeldefimread (path):returnscipy.misc.imread (path). Astype (np.float)defimsave (Path, IMG): IMG= Np.clip (img, 0, 255). Astype (np.uint8) scipy.misc.imsave (Path, IMG)
Step two: Define the convolution operation function
defnet (Data_path, input_image): Layers= ( 'conv1_1','relu1_1','conv1_2','relu1_2','Pool1', 'Conv2_1','Relu2_1','conv2_2','relu2_2','pool2', 'Conv3_1','Relu3_1','Conv3_2','Relu3_2','Conv3_3', 'Relu3_3','Conv3_4','Relu3_4','pool3', 'Conv4_1','Relu4_1','Conv4_2','Relu4_2','Conv4_3', 'Relu4_3','Conv4_4','Relu4_4','Pool4', 'Conv5_1','Relu5_1','Conv5_2','Relu5_2','Conv5_3', 'Relu5_3','Conv5_4','Relu5_4' ) #Loading Datadata =Scipy.io.loadmat (data_path) mean= data['Normalization'][0][0][0] Mean_pixel= Np.mean (mean, axis= (0, 1)) Weights= data['Layers'][0] Net={} current=Input_image forI, nameinchEnumerate (layers): Kind= Name[:4] ifKind = ='Conv': kernels, bias=weights[i][0][0][0][0] Kernels= Np.transpose (Kernels, (1, 0, 2, 3)) #Refactoring ReshapeBias = Bias.reshape (-1) Current=_conv_layer (current, kernels, bias)elifKind = ='Relu': Current=Tf.nn.relu (current)elifKind = ='Pool': Current=_pool_layer (current)#to store the corresponding processing results.Net[name] = CurrentassertLen (net) = =len (layers)returnNET, Mean_pixel, layers
Step three: Construct the file path
# returns the current path of CWD = os.getcwd ()# Others have trained the model '/data/ Imagenet-vgg-verydeep-19.mat '/data/cat.jpg' == (1, input_image.shape[0], input_image.shape[1], input_image.shape[2])
Fourth step: Training model, output feature image
With TF. Session as Sess:image= Tf.placeholder ('float', shape=shape)#Training ModelNets, mean_pixel, all_layers =net (vgg_path, image)#removal of mean valuesInput_image_pre =Np.array ([Preprocess (Input_image, Mean_pixel)]) layers=all_layers forI, layerinchEnumerate (layers):#individual characteristics of the output modelfeatures = Nets[layer].eval (feed_dict={image:input_image})Print("Type of ' features ' is", type (features))Print("Shape of ' features ' is%s"%(Features.shape,))#scroll plot feature map if1: plt.figure (i+1, Figsize= (10, 5)) Plt.matshow (Features[0,:,:, 0], CMap=plt.cm.gray, fignum=i+1) Plt.title (""+layer) Plt.colorbar () plt.show ()
Test with me on the algorithm-TensorFlow VGG model