TensorFlow Code Snippets

Source: Internet
Author: User
tensor creating and modifying placeholder

Https://www.tensorflow.org/api_docs/python/tf/placeholder

Tf.placeholder (
    dtype,
    shape=none,
    name=none
)
element Access Access specific elements by mask
Import NumPy as NP
import TensorFlow as TF;

A = Tf.placeholder (Dtype = tf.float32)
mask = a > 1;
Mask.set_shape ([None, none]); # The shape of the mask must not being none.
b = Tf.placeholder (Dtype = tf.float32)
c = Tf.boolean_mask (b, mask);
With TF. Session () as sess:
    a_val = Np.ones ((3,))
    a_val[1] = 3;
    B_val = a_val
    c_val = Sess.run (c, feed_dict = {a:a_val, b:b_val})
    print C_val
Shape Ascending Dimension
input = tf.expand_dims (
    input,
    axis=none,
    name=none
)
Set Shape

A common scenario is that after an operation, the shape information of the tensor becomes none, but the operator (or we) knows which dim shape information is certain, such as the image shape below, and the third dimension can determine that it will remain unchanged at 3. This information is set up here, you can save a lot of trouble for the later programming.

# Crop The image to the specified bounding box.
Cropped_image = Tf.slice (image, Bbox_begin, bbox_size)
# Restore The shape since the dynamic slice loses 3rd dimension .
Cropped_image.set_shape ([None, none, 3])
related to variable names tf.identity
Import TensorFlow as tf
a = tf. Variable (1, name = ' a ')
B = tf. Variable (2, name = ' B ') with
tf.name_scope (' SS '):
    C = a + b
    c = tf.identity (c, ' C ')
    #c = Tf.add (A, B, Nam e = ' c ')

print C.name

ss/c:0 Type Conversions

Fnmask = Tf.cast (Nmask, Dtype)
Basic Operations sum two variables added
Import TensorFlow as tf

a = tf. Variable (1, name = ' a ')
B = tf. Variable (2, name = ' B ')

C = Tf.add (A, b, name = ' C ')
C = A + b
all elements Plus and
scalar = tf.reduce_sum (tensor)
Max/min Maximum/small element in a tensor
Import NumPy as NP
import TensorFlow as TF;

A = Tf.placeholder (Dtype = tf.float32)
B = Tf.reduce_max (a) # Reduce_min with

TF. Session () as sess:
    a_val = Np.ones ((3, 3)
    a_val[0, 2] = B_val
    = Sess.run (b, feed_dict = {a:a_val}) 
  print B_val

100.0 two x tensor by element Max/small

Import NumPy as NP
import TensorFlow as TF;

A = Tf.placeholder (Dtype = tf.float32)
B = tf.placeholder (Dtype = tf.float32)
c = Tf.maximum (A, B) # minimum
WI TH TF.  Session () as sess:
    a_val = Np.ones ((3, 3)
    a_val[0, 2] = b_val
    = a_val
    c_val = Sess.run (c, feed_dict = {a:a_val, b:b_val})
    Print C_val

[1.1. 100.]
[1.1. 1.]
[1.1. 1.]] Bool and

Mask = Tf.logical_and (Mask1, Mask2);
Loss Weighted Loss
Loss_pos = Tf.losses.compute_weighted_loss (
    losses,
    weights=1.0,
    scope=none,
    loss_collection =TF. Graphkeys.losses
)

The average is calculated using a non-0 number in the weights. optimization Gradient Clip

Https://stackoverflow.com/questions/36498127/how-to-effectively-apply-gradient-clipping-in-tensor-flow

Optimizer = Tf.train.AdamOptimizer (learning_rate=learning_rate)
GVs = optimizer.compute_gradients (cost)
Capped_gvs = [(Tf.clip_by_value (Grad, -1., 1.), Var) for Grad, var in gvs]
train_op = optimizer.apply_gradients (capped _gvs)

But this clip cannot handle Nan:

Import NumPy as NP
import TensorFlow as tf
val = float (' nan ')
a = Np.ones ((3, 3)) * 0.5
a[0,0] = 2
a[0 , 1] = val
tf_a = tf. Variable (a)
Tf_b = Tf.clip_by_value (A,-1, 1) with
tf.device ('/cpu:0 '): With
    TF. Session () as Sess:
        Sess.run (Tf.global_variables_initializer ())
        print Sess.run ([tf_a, Tf_b])
[Array ([[2],  nan,  0.5],
       [0.5,  0.5,  0.5],
       [0.5,  0.5,  0.5]]), 
       array ([1.,  nan,  0.5],
       [0.5,  0.5,  0.5],
       [0.5,  0.5,  0.5]])

Use Tf.is_finite to process both Nan and infty

Import NumPy as NP
import TensorFlow as tf
val = float (' nan ')
a = Np.ones ((3, 3)) * 0.5
a[0,0] = np.infty< c4/>a[0,1] = val
tf_a = tf. Variable (a)
#tf_b = Tf.clip_by_value (A,-1, 1)
mask = tf.is_finite (tf_a)
tf_c = Tf.where (Mask, tf_a, Tf.zeros_like (a)) with
tf.device ('/cpu:0 '): With
    TF. Session () as Sess:
        Sess.run (Tf.global_variables_initializer ())
        print Sess.run ([tf_a, Tf_c])
[Array ([INF,  nan,  0.5],
       [0.5,  0.5,  0.5],
       [0.5,  0.5,  0.5]]), 
 Array ([[0.,  0.,  0.5],
       [0.5,  0.5,  0.5],
       [0.5,  0.5,  0.5]])
Py_func

Https://www.tensorflow.org/api_docs/python/tf/py_func

Import NumPy as NP
import TensorFlow as TF

def add (A, B):
    return a + B;


Def Tf_add (A, B):
    return Tf.py_func (Add, [A, b], tf.float32);


Ta = tf.placeholder (Dtype = tf.float32)
TB = tf.placeholder (Dtype = tf.float32)
TC = Tf_add (TA, TB);
With TF. Session () as sess:
    a_val = Np.ones ((2, 2))
    B_val = A_val * 2;
    Print Sess.run ([TC], feed_dict = {ta:a_val, tb:b_val})

Py_func receives three parameters: The function object, the TF type to return the value of the tensor, to be passed in. If the type parameter is a list, for example [Tf.float32], the return value is also a list.

Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.