Deep Learning (73) Pytorch study notes

Source: Internet
Author: User
Tags split volatile pytorch dataloader

First spit groove, deep learning development speed is really fast, deep learning framework is gradually iterative, it is really hard for me to engage in deep learning programmer. I began three years ago to learn deep learning, these deep learning frameworks are also a change, from Keras, Theano, Caffe, Darknet, TensorFlow, and finally now to start using Pytorch.

I. Variable, derivative Torch.autograd module

When the default variable is defined, Requires_grad is false, the variable is non-conductive, and if set to true, indicates that the variable is to be directed.

#coding =utf-8
#requires_grad默认为false
# If all of the variables are non-conductive when calling backward, then you will end up with an error import with no variables available
torch From
Torch import  Autograd
input=torch. Floattensor ([Input_v=autograd])
. Variable (input,requires_grad=true)
Loss=torch.mean (input_v)

print Loss.requires_grad
loss.backward ()
print input_v
print Input_v.grad

second, data layer and its transformation

From PIL import Image import torchvision import matplotlib.pyplot as PLT import NumPy as np #数据变换 Data_transform_train = t Orchvision.transforms.Compose ([Torchvision.transforms.RandomRotation (32), Torchvision.transforms.RandomCrop ( , (+)), Torchvision.transforms.RandomHorizontalFlip (), Torchvision.transforms.ToTensor (), Torchvision.transform S.normalize ((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]) data_transform_eval=torchvision.transforms.compose ([Torchvision. Transforms. Centercrop ((32,32)), Torchvision.transforms.ToTensor (), Torchvision.transforms.Normalize ((0.5, 0.5, 0.5), (0.5, 0. 5, 0.5)]) #对于自定义的数据, to reload the following three functions GetItem, Len, init class MyData (Dataset): Def __init__ (Self,label_file,image_root,is_
            Train=true): self.imagepaths=[] self.labels=[] self.is_train=is_train if Is_train: Self.transforms=data_transform_train Else:self.transforms=data_transform_eval with Open (Label_file, ' R 'As F:for line in F.readlines (): #读取label文件 self.imagepaths.append (Os.path.join (Image_root,li Ne.split () [0]) self.labels.append (int (Line.split () [1]) def __getitem__ (self, item): X=image. Open (Self.imagepaths[item]). Resize ((35,35)) Y=self.labels[item] if Self.is_train:return [sel
        F.transforms (x), self.transforms (x)], y Else:return self.transforms (x), y def __len__ (self): Return Len (self.imagepaths) def make_weights_for_balanced_classes (labels, nclasses): Count = {} for item in lab Els:if Count.has_key (item): Count[item] + = 1 else:count[item]=1 weight_per_c  Lass ={} N = len (labels) for Key,value in Count.items (): Weight_per_class[key] = n/float (value) weight


= [0] * Len (labels) for IDX, Val in enumerate (labels): weight[idx] = Weight_per_class[val] return weight Train_data=mydata (' DatA/train.txt ', './', is_train=true) weights = make_weights_for_balanced_classes (train_data.labels, 3) weights = Torch. Doubletensor (weights) Sampler = Torch.utils.data.sampler.WeightedRandomSampler (weights, Len (weights)) Train_  Dataloader_student=dataloader (Train_data,batch_size=6,sampler=sampler) for x, Y in train_dataloader_student:for XI in X:print y npimg = Torchvision.utils.make_grid (xi). NumPy () #可视化显示 plt.imshow (Np.transpose (npimg), ( 1, 2, 0))) plt.show ()

third, network architecture

From Moving_avarage_layer import conv2d_moving Import torch from torch import autograd,nn from torch.utils.data import Da Taloader, Dataset from data_layer import mydata,make_weights_for_balanced_classes import torchvision import numpy as NP im Port Matplotlib.pyplot as PLT import torch.nn.functional as function Import OS import time Class Mobilenet (NN.
            Module): def __init__ (self): Super (Mobilenet, self). __init__ () def conv_bn (INP, OUP, Stride): Return NN. Sequential (NN. conv2d (INP, OUP, 3, Stride, 1, bias=false), nn. BATCHNORM2D (OUP), nn. ReLU (inplace=true)) def conv_dw (INP, OUP, Stride): Return NN. Sequential (NN. Conv2d (INP, INP, 3, Stride, 1, GROUPS=INP, bias=false), nn. Batchnorm2d (INP), nn. ReLU (inplace=true), nn. conv2d (INP, OUP, 1, 1, 0, bias=false), nn. BATCHNORM2D (OUP), nn. ReLU (Inplace=true),) Self.model = nn.
            Sequential (Conv_bn (3, 2), Conv_dw (+, 1), CONV_DW (64, 96, 2), CONV_DW (1), CONV_DW (2), CONV_DW (1), CONV_DW (128, 256, 2) , Conv_dw (1), CONV_DW (1), nn. Avgpool2d (2),) SELF.FC = nn.
        Linear (4) def forward (self, x): x = Self.model (x) #print X.shape x = X.view (-1, 512) x = SELF.FC (x) return X

four, optimization solution

def update_ema_variables (model, Ema_model,alpha): For Ema_param, param in Zip (ema_model.parameters (), model.parameters
    ()): Ema_param.data.mul_ (Alpha). Add_ (1-alpha, Param.data) def softmax_mse_loss (Input_logits, target_logits): Assert input_logits.size () = = Target_logits.size () Input_softmax = Function.softmax (input_logits, Dim=1) target_s Oftmax = Function.softmax (target_logits, dim=1) num_classes = Input_logits.size () [1] return Function.mse_loss (Inpu

T_softmax, Target_softmax, Size_average=false)/num_classes torch.backends.cudnn.enabled = False torch.manual_seed (7) Net_student=mobilenet (). Cuda () net_teacher=mobilenet (). Cuda () for Param in net_teacher.parameters (): param . Detach_ () If Os.path.isfile (' teacher.pt '): Net_student.load_state_dict (Torch.load (' teacher.pt ')) Train_data=myda Ta (' Data/race/train.txt ', './', is_train=true) min_batch_size=32 weights = make_weights_for_balanced_classes (train_ Data.labels, 5) weights = Torch. DoUbletensor (weights) Sampler = Torch.utils.data.sampler.WeightedRandomSampler (weights, Len (weights)) Train_ Dataloader=dataloader (train_data,batch_size=min_batch_size,sampler=sampler,num_workers=8) valid_data=mydata (' Data/race/val.txt ', './', Is_train=false) Valid_dataloader=dataloader (Valid_data,batch_size=min_batch_size, shuffle=true,num_workers=8) classify_loss_function = Torch.nn.CrossEntropyLoss (size_average=false,ignore_index=-1 ). Cuda () optimizer = Torch.optim.SGD (Net_student.parameters (), lr = 0.001, momentum=0.9) globals_step=0 for epoch in RA
    Nge (10000): globals_classify_loss=0 globals_consistency_loss = 0 net_student.train () start=time.time () End=0 for index, (x, Y) in Enumerate (Train_dataloader): Optimizer.zero_grad () # X_student=autograd. Variable (X[0]). Cuda () Y=autograd. Variable (y). Cuda () predict_student=net_student (x_student) classify_loss=classify_loss_function (predict_st udent,y)/min_batch_size suM_loss = Classify_loss x_teacher= Autograd. Variable (x[1],volatile=true). Cuda () Predict_teacher = Net_teacher (x_teacher) ema_logit = Autograd. Variable (Predict_teacher.detach (). Data, Requires_grad=false) Consistency_loss =softmax_mse_loss (predict_student,e Ma_logit)/min_batch_size consistency_weight=1 Sum_loss+=consistency_weight*consistency_loss Global  S_consistency_loss + = consistency_loss.data[0] Sum_loss.backward () optimizer.step () Alpha = min (1 -1/(Globals_step + 1), 0.99) Update_ema_variables (net_student, Net_teacher, Alpha) globals_classify
    _loss +=classify_loss.data[0] Globals_step + = 1 if epoch%5!=0:continue net_student.eval () correct = 0 Total = 0 for images, labels in Valid_dataloader:valid_input=autograd. Variable (images,volatile=true). Cuda () outputs = Net_student (valid_input) #print outputs.shape _, p RediCTED = Torch.max (outputs.data, 1) Total + = labels.size (0) correct + = (predicted.cpu () = = labels). SUM () Print "epoch:%d"%epoch, "time:%d"% (Time.time ()-start), ' accuracy%d '% (correct/total), "consistency loss:% F "%globals_consistency_loss, ' classify loss%f: '%globals_classify_loss torch.save (net_student.state_dict (), '
 teacher.pt ')

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.