Several split loss

Source: Internet
Author: User
Tags assert min volatile pytorch
1 2d Cross entropy (Mutil class split)

Defined

def cross_entropy2d (input, Target, Weight=none, size_average=true):
    # Input: (n, C, H, W), Target: (N, H, W)
    N, C, H, W = input.size ()
    # log_p: (n, C, H, W)
    if Looseversion (torch.__version__) < Looseversion (' 0.3 '):
        # ==0. 2.X
        log_p = F.log_softmax (input)
    else:
        # >=0.3
        log_p = F.log_softmax (input, Dim=1)
    # log_p: (n *h*w, c)
    log_p = Log_p.transpose (1, 2). Transpose (2, 3). Contiguous ()
    log_p = Log_p[target.view (N, H, W, 1). Repeat (1, 1, 1, c) >= 0]
    log_p = Log_p.view ( -1, c)
    # target: (N*h*w,)
    mask = target >= 0
    Target = Target[mask]
    loss = F.nll_loss (log_p, Target, Weight=weight, Size_average=false)
    if size_average:
        Loss/= Mask.data.sum ()
    return loss

Use

            Data, target = Variable (data, volatile=true), Variable (target)
            score = Self.model (data)

            loss = Cross_entropy2d ( Score, Target,
                                   size_average=self.size_average)
            if Np.isnan (float (loss.data[0])):
                Raise ValueError (' Loss is Nan while validating ')
            Val_loss + = float (loss.data[0])/len (data)

            IMGs = Data.data.cpu ()
            lbl_pred = s Core.data.max (1) [1].cpu (). NumPy () [:,:,:]
            lbl_true = TARGET.DATA.CPU ()

Other metrics

Metrics = Label_accuracy_score (Label_trues, Label_preds, n_class) import NumPy as NP def _fast_hist (Label_true, label_p Red, n_class): Mask = (label_true >= 0) & (Label_true < N_class) hist = Np.bincount (N_class * l)


Abel_true[mask].astype (int) + Label_pred[mask], Minlength=n_class * * 2). Reshape (N_class, N_class) return hist
      def label_accuracy_score (Label_trues, Label_preds, N_class): "" "Returns Accuracy score evaluation result.
    -Overall Accuracy-mean Accuracy-mean IU-FWAVACC "" "hist = Np.zeros ((N_class, N_class)) For LT, LP in Zip (Label_trues, label_preds): hist + = _fast_hist (Lt.flatten (), Lp.flatten (), n_class) acc =  Np.diag (hist). SUM ()/hist.sum () Acc_cls = Np.diag (hist)/Hist.sum (Axis=1) acc_cls = Np.nanmean (acc_cls) IU = Np.diag (hist)/(Hist.sum (Axis=1) + hist.sum (axis=0)-Np.diag (hist)) Mean_iu = Np.nanmean (IU) freq = hist.sum (ax Is=1)/hist.sum () FwavaCC = (Freq[freq > 0] * iu[freq > 0]). SUM () return ACC, ACC_CLS, Mean_iu, FWAVACC 

HTTPS://GITHUB.COM/WKENTARO/PYTORCH-FCN 2 BCE loss

            Criterion = nn. Bceloss ()
            y_pred = Net (X)
            probs = f.sigmoid (y_pred)
            Probs_flat = Probs.view ( -1)

            Y_flat = Y.view (-1)

            loss = criterion (Probs_flat, Y_flat.float ())
            Epoch_loss + = loss.data[0]

            print (' {0:.4f}---loss: {1:.6f} ' . Format (i * Batch_size/n_train,
                                                     loss.data[0])

            Optimizer.zero_grad ()

            Loss.backward ()

            Optimizer.step ()

The evaluation of the time and choose dice loss This is what reason.

Import Torch from Torch.autograd import Function, Variable class Dicecoeff (function): "" "Dice Coeff for individual E Xamples "" "Def forward (self, input, target): Self.save_for_backward (input, target) Self.inter = Torch . dot (input, target) + 0.0001 self.union = torch.sum (input) + torch.sum (target) + 0.0001 t = 2 * self.inte R.float ()/self.union.float () return t # This function have only a single output, so it gets only one Gradien T def backward (self, grad_output): input, target = self.saved_variables Grad_input = Grad_target = No
                         NE if self.needs_input_grad[0]: grad_input = grad_output * 2 * (target * self.union + self.inter) \

        /self.union * self.union if self.needs_input_grad[1]: Grad_target = None Return grad_input, Grad_target def eval_net (): ... y_pred = net (X) y_pred = (F.sigmoid (y_p
 RED) > 0.6). Float ()       # y_pred = f.sigmoid (y_pred). Float () dice = Dice_coeff (y_pred, Y.float ()). Data[0] ...
 

Forecast

        X = Torch. Floattensor (X). Unsqueeze (0) y = torch. Bytetensor (y). Unsqueeze (0) If gpu:x = Variable (X, volatile=true). Cuda () y = Variable (y,

        volatile=true). Cuda () else:x = Variable (X, volatile=true) y = Variable (y, Volatile=true) y_pred = Net (X) y_pred = (f.sigmoid (y_pred) > 0.6). Float () # y_pred = F.sigmoid (y_pred). float () Dice = Dice_coeff (y_pred, Y.float ()). data[0] Tot + = dice if 0:x = X.data.squeeze
            (0). CPU (). NumPy () X = Np.transpose (x, axes=[1, 2, 0]) y = y.data.squeeze (0). CPU (). NumPy () y_pred = Y_pred.data.squeeze (0). Squeeze (0). CPU (). NumPy () print (y_pred.shape) FIG = plt.figure
            () Ax1 = Fig.add_subplot (1, 4, 1) ax1.imshow (X) ax2 = Fig.add_subplot (1, 4, 2) Ax2.imshow (y) ax3 = Fig.add_subplot (1, 4, 3) Ax3.imshow ((y_pred > 0.5)) 

https://github.com/milesial/Pytorch-UNet/blob/master/eval.py 3 loss and BCE overlay

        Outputs = model (images)
        loss, bce_loss, Soft_dice_loss = criterion (outputs, labels)
        loss_val = loss.data[0]
        Sum_epoch_loss + = Loss_val
        if i = = 0:
            optimizer.zero_grad ()
        Loss.backward ()

loss.py

Import Torch Import torch.utils.data import torch.nn as nn import torch.nn.functional as F from Torch.autograd import Vari Able Import NumPy as NP class bceloss2d (NN. Module): "" "Binary Cross Entropy Loss function" "Def __init__ (self): Super (bceloss2d, self). __i nit__ () Self.bce_loss = nn. Bcewithlogitsloss () def forward (self, logits, labels): Logits_flat = Logits.view ( -1) Labels_flat = LA Bels.view ( -1) return Self.bce_loss (Logits_flat, Labels_flat) class weightedbceloss2d (NN. Module): def __init__ (self): Super ("weightedbceloss2d, Self"). __init__ () def forward (self, logits, labels, Weights): w = Weights.view ( -1) logits = Logits.view ( -1) GT = Labels.view ( -1) # Http://geek
        . csdn.net/news/detail/126833 loss = Logits.clamp (min=0)-logits * GT + torch.log (1 + torch.exp (-logits.abs ())) Loss = Loss * W loss = Loss.sum ()/w.sum () Return loss class WeigHtedsoftdiceloss (NN. Module): def __init__ (self): Super ("Weightedsoftdiceloss, Self"). __init__ () def forward (self, logits, label  s, weights): probs = f.sigmoid (logits) num = labels.size (0) w = weights.view (num,-1) W2 =  W * W m1 = Probs.view (num,-1) m2 = Labels.view (num,-1) intersection = (M1 * m2) score = 2. * ((W2 * intersection). SUM (1) + 1)/((W2 * M1). SUM (1) + (W2 * m2). SUM (1) + 1) score = 1-score. SUM ()/NUM return score Class Softdiceloss (NN.
        Module): def __init__ (self): Super ("Softdiceloss, Self"). __init__ () def forward (self, logits, labels): probs = f.sigmoid (logits) num = labels.size (0) m1 = Probs.view (num,-1) m2 = Labels.view (num, -1) Intersection = (M1 * m2) score = 2. * (Intersection.sum (1) + 1)/(M1.sum (1) + m2.sum (1) + 1) score = 1-score.sum ()/Num return score CLA SS Dicescore (Nn. Module): Def __init__ (self, threshold=0.5): Super (Dicescore, self). __init__ () Self.threshold = Thresho  LD def forward (self, logits, labels): probs = f.sigmoid (logits) num = labels.size (0) predicts = (Probs.view (num,-1) > Self.threshold). Float () labels = Labels.view (num,-1) intersection = (predict S * Labels) score = 2. * (Intersection.sum (1))/(Predicts.sum (1) + labels.sum (1)) return Score.mean () def dice_score_np (predicted_mask s, labels): Assert len (predicted_masks.shape) >= 3 num = predicted_masks.shape[0] Predicted_masks = Predic  Ted_masks.reshape (num,-1). Astype (float) assert predicted_masks.min () = = 0 and Predicted_masks.max () = = 1.0 Labels = Labels.reshape (num,-1) assert labels.min () = = 0 and Labels.max () = = 1.0 Intersection = (Predicted_masks * Labe LS) assert not Np.any ((predicted_masks.sum (1) + labels.sum (1)) = = 0) score = 2. * (Intersection.sum (1))/(Predicted_masks.sum (1) + labels.sum (1)) Assert len (score.shape) = = 1 return Score.mean () class Combinedloss (n
        N.module): Def __init__ (self, Is_weight=true, Is_log_dice=false): Super (Combinedloss, self). __init__ () Self.is_weight = Is_weight Self.is_log_dice = Is_log_dice if SELF.IS_WEIGHT:SELF.WEIGHTED_BC E = weightedbceloss2d () Self.soft_weighted_dice = Weightedsoftdiceloss () ELSE:SELF.BCE = Bceloss2d () Self.soft_dice = Softdiceloss () def forward (self, logits, labels): size = Logits.size () assert size[1] = = 1, size logits = Logits.view (Size[0], size[2], size[3]) labels = Labels.view ( Size[0], size[2], size[3]) if self.is_weight:batch_size, h, W = Labels.size () if H = = 128
                : kernel_size = one elif h = = 256:kernel_size = Elif H = = 512: kernel_size = elif H = = 1024:kernel_size = Elif H = = 1280:kernel_s ize = Else:raise valueerror (' Unknown height ') a = f.avg_pool2d (labels, kernel _size=kernel_size, Padding=kernel_size//2, stride=1) ind = a.ge (0.01) * A.le (0.  () ind = ind.float () weights = Variable (Torch.tensor.torch.ones (A.size ())). Cuda () w0

            = Weights.sum () Weights + = IND * 2 W1 = weights.sum () weights = WEIGHTS/W1 * W0 Bce_loss = SELF.WEIGHTED_BCE (logits, labels, weights) Dice_loss = Self.soft_weighted_dice (Logits, LA BELs, weights) Else:bce_loss = SELF.BCE (logits, labels) Dice_loss = Self.soft_dice (logits , labels) if self.is_log_dice:l = Bce_loss-(1-dice_loss). log () else:l = BCE
      _loss + Dice_loss  Return L, Bce_loss, Dice_loss def combined_loss (logits, labels, is_weight=true, is_log_dice=false): size = logits.  Size () assert size[1] = = 1, size logits = Logits.view (Size[0], size[2], size[3]) labels = Labels.view (size[0),  SIZE[2], size[3]) if is_weight:batch_size, h, W = Labels.size () if H = = 128:kernel_size  = one elif h = = 256:kernel_size = Elif H = = 512:kernel_size = Elif H = = 1024:kernel_size = Elif H = = 1280:kernel_size = Wuyi Else:ra Ise valueerror (' Unknown height ') a = f.avg_pool2d (labels, kernel_size=kernel_size, padding=kernel_size//2, Stri de=1) ind = a.ge (0.01) * A.le (0.99) ind = ind.float () weights = Variable (Torch.tensor.torch.ones (A . Size ()). Cuda () W0 = Weights.sum () Weights + = IND * 2 W1 = weights.sum () weights = weigh TS/W1 * w0 BCE_loss = Weightedbceloss2d (). Cuda () (logits, labels, weights) Dice_loss = Weightedsoftdiceloss (). Cuda () (Logits, Lab Els, weights) Else:bce_loss = Bceloss2d (). Cuda (logits, labels) Dice_loss = Softdiceloss (). Cuda () (LO 
    Gits, labels) if is_log_dice:l = Bce_loss-(1-dice_loss). log () Else:l = Bce_loss + Dice_loss Return L, Bce_loss, Dice_loss

Predictive mask

        Images = Variable (Images.cuda (), volatile=true)

        outputs = model (images)
        outputs = f.upsample (outputs, size= Original_shape, mode= ' bilinear ')
        output_probs = f.sigmoid (outputs)
        if save_probs:
            probs_np = Np.squeeze ( Output_probs.data.cpu (). NumPy ())
            if Len (probs_np.shape) = = 2:
                probs_np = Probs_np[np.newaxis, ...]
            Assert len (probs_np.shape) = = 3, probs_np.shape
            prob_images = Np.asarray (Np.round (PROBS_NP * 255), dtype=np.uint8) C10/>for probs_img, Sample_name in Izip (prob_images, names):
                cv2.imwrite (str (output_dir.joinpath (sample_name + ') . png ')), probs_img)
4 BCE and dice loss synthesis

The Albu method is also integrated BCE and Diceloss

def dice_loss (Preds, Trues, Weight=none, is_average=true): num = preds.size (0) preds = Preds.view (num,-1) Tru ES = Trues.view (num,-1) If weight is not none:w = torch.autograd.Variable (weight). View (num,-1) pred s = preds * W trues = trues * W intersection = (Preds * trues). SUM (1) scores = 2. * (intersection + 1)/(Preds.sum (1) + trues.sum (1) + 1) If Is_average:score = Scores.sum ()/num Retu
    RN Torch.clamp (score, 0., 1.) Else:return scores def dice_clamp (Preds, Trues, is_average=true): Preds = Torch.round (preds) return dice _loss (Preds, Trues, Is_average=is_average) class Diceloss (NN.

    Module): Def __init__ (self, size_average=true): Super (). __init__ () Self.size_average = Size_average Def forward (self, input, target, Weight=none): Return 1-dice_loss (f.sigmoid (input), Target, Weight=weight, is_a Verage=self.size_average) class Bcediceloss (NN. Module): Def __init__ (Self, size_average=true): Super (). __init__ () Self.size_average = size_average Self.dice = Diceloss (Size_average=size_average) def forward (self, input, target, Weight=none): Return Nn.modules.loss.BCEWithLogi Tsloss (Size_average=self.size_average, Weight=weight) (input, target) + self.dice (input, Target, weight=weight) YP Reds = Self.model (images) loss = Self.criterion (ypreds, ytrues) ret[' loss '

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.