今回は, 数学関数[1]に関するPython APIをベースに, どのような演算できるか確認してみる.
注) TensorFlow : Python API 0.6.0ベース
1. 四則演算
API | 機能 |
tf.add(x, y, name=None) | 加算 : x + y |
tf.sub(x, y, name=None) | 減算 : x - y |
tf.mul(x, y, name=None) | 乗算 : x * y |
tf.div(x, y, name=None) | 除算 : x / y |
tf.truediv(x, y, name=None) | 除算(浮動小数点数) : x / y |
tf.floordiv(x, y, name=None) | 除算(切り下げ) : x / y |
tf.mod(x, y, name=None) | 剰余 : x % y |
サンプルコード:
import tensorflow as tf sess = tf.InteractiveSession() ################ # tf.add ################ x = tf.constant([1., 2.]) y = tf.constant([0.5, 0.5]) tf_add = tf.add(x, y) print "tf.add" print sess.run(tf_add) # output: # tf.add # [ 1.5 2.5] ################ # tf.sub ################ x = tf.constant([[1., 2.], [3., 4.]]) y = tf.constant([[0.5, 0.5], [1., 1.]]) tf_sub = tf.sub(x, y) print "tf.sub" print sess.run(tf_sub) # output: # tf.sub # [[ 0.5 1.5] # [ 2. 3. ]] ################ # tf.mul ################ x = tf.constant([1., 2.]) y = tf.constant([[2.], [3.]]) tf_mul = tf.mul(x, y) print "tf.mul" print sess.run(tf_mul) # output: # tf.mul # [[ 2. 4.] # [ 3. 6.]] ################ # tf.div ################ x = tf.constant([10, 15]) y = tf.constant([3]) tf_div = tf.div(x, y) print "tf.div" print sess.run(tf_div) # output: # tf.div # [3 5] ################ # tf.truediv ################ x = tf.constant([10, 15]) y = tf.constant([3]) tf_truediv = tf.truediv(x, y) print "tf.truediv" print sess.run(tf_truediv) # output: # tf.truediv # [ 3.33333333 5. ] ################ # tf.floordiv ################ x = tf.constant([10., 15.]) y = tf.constant([3.]) tf_floordiv = tf.floordiv(x, y) print "tf.floordiv" print sess.run(tf_floordiv) # output: # tf.floordiv # [ 3. 5.] ################ # tf.mod ################ x = tf.constant([10., 15.]) y = tf.constant([3.]) tf_mod = tf.mod(x, y) print "tf.mod" print sess.run(tf_mod) # output: # tf.mod # [ 1. 0.] sess.close()
2. 基本的な数学関数
API | 機能 |
tf.add_n(inputs, name=None) | すべての入力Tensorを要素ごとに加算 |
tf.abs(x, name=None) | Tensorの絶対値(|x|)を計算 |
tf.neg(x, name=None) | 要素ごとに半数(-x)を計算 |
tf.sign(x, name=None) | 要素ごとに数字の符号を返す |
tf.inv(x, name=None) | 要素ごとに逆数(1/x)を計算 |
tf.square(x, name=None) | 要素ごとに2乗(x^2)を計算 |
tf.round(x, name=None) | 要素ごとに四捨五入した値を返す |
tf.sqrt(x, name=None) | 要素ごとに平方根(√x)を計算 |
tf.rsqrt(x, name=None) | 要素ごとに平方根の逆数(1/√x)を計算 |
tf.pow(x, y, name=None) | 要素ごとに累乗(x^y)を計算 |
tf.exp(x, name=None) | 要素ごとに指数関数(e^x)を計算 |
tf.log(x, name=None) | 要素ごとに自然対数(log x)を計算 |
tf.ceil(x, name=None) | 要素ごとにxを下回らない最も小さい整数を返す |
tf.floor(x, name=None) | 要素ごとにxを超えない最も大きい整数を返す |
tf.maximum(x, y, name=None) | 要素ごとに最大値を返す |
tf.minimum(x, y, name=None) | 要素ごとに最小値を返す |
tf.cos(x, name=None) | 要素ごとに余弦(cos)を計算 |
tf.sin(x, name=None) | 要素ごとに正弦(sin)を計算 |
サンプルコード:
import tensorflow as tf sess = tf.InteractiveSession() ################ # tf.add_n ################ a = tf.constant([1., 2.]) b = tf.constant([3., 4.]) c = tf.constant([5., 6.]) tf_addn = tf.add_n([a, b, c]) print "tf.add_n" print sess.run(tf_addn) # output: # tf.add_n # [ 9. 12.] ################ # tf.abs ################ x = tf.constant([[-1., 2.], [3., -4.]]) tf_abs = tf.abs(x) print "tf.abs" print sess.run(tf_abs) # output: # tf.abs # [[ 1. 2.] # [ 3. 4.]] ################ # tf.neg ################ x = tf.constant([[-1., 2.], [3., -4.]]) tf_neg = tf.neg(x) print "tf.neg" print sess.run(tf_neg) # output: # tf.neg # [[ 1. -2.] # [-3. 4.]] ################ # tf.sign ################ x = tf.constant([[-1., 2.], [3., -4.]]) tf_sign = tf.sign(x) print "tf.sign" print sess.run(tf_sign) # output: # tf.sign # [[-1. 1.] # [ 1. -1.]] ################ # tf.inv ################ x = tf.constant([[-1., 2.], [3., -4.]]) tf_inv = tf.inv(x) print "tf.inv" print sess.run(tf_inv) # output: # tf.inv # [[-1. 0.5 ] # [ 0.33333334 -0.25 ]] ################ # tf.square ################ x = tf.constant([[-1., 2.], [3., -4.]]) tf_square = tf.square(x) print "tf.square" print sess.run(tf_square) # output: # tf.square # [[ 1. 4.] # [ 9. 16.]] ################ # tf.round ################ x = tf.constant([0.9, 2.5, 2.3, -4.4]) tf_round = tf.round(x) print "tf.round" print sess.run(tf_round) # output: # tf.round # [ 1. 3. 2. -4.] ################ # tf.sqrt ################ x = tf.constant([[1., 2.], [3., 4.]]) tf_sqrt = tf.sqrt(x) print "tf.sqrt" print sess.run(tf_sqrt) # output: # tf.sqrt # [[ 0.99999994 1.41421342] # [ 1.73205078 1.99999988]] ################ # tf.rsqrt ################ x = tf.constant([[1., 2.], [3., 4.]]) tf_rsqrt = tf.rsqrt(x) print "tf.rsqrt" print sess.run(tf_rsqrt) # output: # tf.rsqrt # [[ 0.99999994 0.70710671] # [ 0.57735026 0.49999997]] ################ # tf.pow ################ x = tf.constant([[2, 2], [3, 3]]) y = tf.constant([[8, 16], [2, 3]]) tf_pow = tf.pow(x, y) print "tf.pow" print sess.run(tf_pow) # output: # tf.pow # [[ 256 65536] # [ 9 27]] ################ # tf.exp ################ x = tf.constant([[1., 2.], [3., 4.]]) tf_exp = tf.exp(x) print "tf.exp" print sess.run(tf_exp) # output: # tf.exp # [[ 2.71828175 7.38905621] # [ 20.08553696 54.59815216]] ################ # tf.log ################ x = tf.constant([[1., 2.], [3., 4.]]) tf_log = tf.log(x) print "tf.log" print sess.run(tf_log) # output: # tf.log # [[ 0. 0.69314718] # [ 1.09861231 1.38629436]] ################ # tf.ceil ################ x = tf.constant([[1.1, 2.2], [3.3, 4.4]]) tf_ceil = tf.ceil(x) print "tf.ceil" print sess.run(tf_ceil) # output: # tf.ceil # [[ 2. 3.] # [ 4. 5.]] ################ # tf.floor ################ x = tf.constant([[1.1, 2.2], [3.3, 4.4]]) tf_floor = tf.floor(x) print "tf.floor" print sess.run(tf_floor) # output: # tf.floor # [[ 1. 2.] # [ 3. 4.]] ################ # tf.maximum ################ x = tf.constant([[2, 8], [3, 12]]) y = tf.constant([[4, 10], [1, 9]]) tf_maximum = tf.maximum(x, y) print "tf.maximum" print sess.run(tf_maximum) # output: # tf.maximum # [[ 4 10] # [ 3 12]] ################ # tf.minimum ################ x = tf.constant([[2, 8], [3, 12]]) y = tf.constant([[4, 10], [1, 9]]) tf_minimum = tf.minimum(x, y) print "tf.minimum" print sess.run(tf_minimum) # output: # tf.minimum # [[2 8] # [1 9]] ################ # tf.cos ################ x = tf.constant([[2., 8.], [3., 12.]]) tf_cos = tf.cos(x) print "tf.cos" print sess.run(tf_cos) # output: # tf.cos # [[-0.41614681 -0.14550003] # [-0.9899925 0.84385395]] ################ # tf.sin ################ x = tf.constant([[2., 8.], [3., 12.]]) tf_sin = tf.sin(x) print "tf.sin" print sess.run(tf_sin) # output: # tf.sin # [[ 0.90929741 0.98935825] # [ 0.14112 -0.53657293]] sess.close()
3. 行列演算関数
API | 機能 |
tf.diag(diagonal, name=None) | 対角成分を持つTensorを返す |
tf.transpose(a, perm=None, name=transpose) | 行と列を入れ替える |
tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None) | 行列の積を計算 |
tf.batch_matmul(x, y, adj_x=None, adj_y=None, name=None) | Tensorの行列スライスの積を計算 |
tf.matrix_determinant(input, name=None) | 正方行列の行列式を計算 |
tf.batch_matrix_determinant(input, name=None) | Tensorの正方行列スライスの行列式を計算 |
tf.matrix_inverse(input, name=None) | 正方行列の逆行列を計算 |
tf.batch_matrix_inverse(input, name=None) | Tensorの正方行列スライスの逆行列を計算 |
tf.cholesky(input, name=None) | 正方行列のコレスキー分解を計算 |
tf.batch_cholesky(input, name=None) | Tensorの正方行列スライスのコレスキー分解を計算 |
tf.self_adjoint_eig(input, name=None) | エルミート行列の固有値分解を計算 |
tf.batch_self_adjoint_eig(input, name=None) | Tensorのエルミート行列スライスの固有値分解を計算 |
サンプルコード:
# -*- coding: utf-8 -*- import tensorflow as tf sess = tf.InteractiveSession() ################################ # tf.diag ################################ # 'diagonal' is [1, 2, 3, 4] tf_diag = tf.diag([1, 2, 3, 4]) print "tf.diag" print sess.run(tf_diag) # output: # tf.diag # [[1 0 0 0] # [0 2 0 0] # [0 0 3 0] # [0 0 0 4]] ################################ # tf.transpose ################################ # 'x' is [[1 2 3] # [4 5 6]] x = tf.constant([[1, 2, 3], [4, 5, 6]]) tf_trans = tf.transpose(x) print "tf.transpose" print sess.run(tf_trans) # output: # [[1 4] # [2 5] # [3 6]] # Equivalently tf_trans = tf.transpose(x, perm=[1, 0]) print sess.run(tf_trans) # output: # [[1 4] # [2 5] # [3 6]] # 'perm' is more useful for n-dimensional tensors, for n > 2 # 'x' is [[[1 2 3] # [4 5 6]] # [[7 8 9] # [10 11 12]]] # Take the transpose of the matrices in dimension-0 x = tf.constant([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) tf_trans = tf.transpose(x, perm=[0, 2, 1]) print sess.run(tf_trans) # output: # [[[ 1 4] # [ 2 5] # [ 3 6]] # # [[ 7 10] # [ 8 11] # [ 9 12]]] ################################ # tf.matmul ################################ # 2-D tensor `a` a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) # 2-D tensor `b` b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) tf_matmul = tf.matmul(a, b) print "tf.matmul" print sess.run(tf_matmul) # output: # tf.matmul # [[ 58 64] # [139 154]] ################################ # tf.matrix_determinant ################################ a = tf.constant([1., 2., 3., 4.], shape=[2, 2]) tf_determinant = tf.matrix_determinant(a) print "tf.matrix_determinant" print sess.run(tf_determinant) # output: # tf.matrix_determinant # -2.0 ################################ # tf.matrix_inverse ################################ a = tf.constant([1., 2., 3., 4.], shape=[2, 2]) tf_inverse = tf.matrix_inverse(a) print "tf.matrix_inverse" print sess.run(tf_inverse) # output: # tf.matrix_inverse # [[-2.00000024 1.00000012] # [ 1.50000012 -0.50000006]] ################################ # tf.batch_matmul # tf.batch_matrix_determinant # tf.batch_matrix_inverse ################################ # 行列スライスについて調査中 ################################ # tf.cholesky # tf.batch_cholesky ################################ # コレスキー分解について調査中 ################################ # tf.self_adjoint_eig # tf.batch_self_adjoint_eig ################################ # エルミート行列について調査中 sess.close()
4. 複素数関数
API | 機能 |
tf.complex(real, imag, name=None) | 実部/虚部から複素テンソルに変換 |
tf.complex_abs(x, name=None)) | 複素数の絶対値(a+bj→√(a^+b^2))を計算 |
tf.conj(in_, name=None)) | 複素共役(a+bj→a-bj)を返す |
tf.imag(in_, name=None)) | 複素数の虚部(a+bj→b)を返す |
tf.real(in_, name=None)) | 複素数の実部(a+bj→a)を返す |
サンプルコード:
import tensorflow as tf sess = tf.InteractiveSession() ################ # tf.complex ################ # tensor 'real' is [2.25, 3.25] # tensor `imag` is [4.75, 5.75] tf_complex = tf.complex([2.25, 3.25], [4.75, 5.75]) print "tf.complex" print sess.run(tf_complex) # output: # tf.complex # [ 2.25+4.75j 3.25+5.75j] ################ # tf.complex_abs ################ # tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]] x = tf.complex([-2.25, -3.25], [4.75, 5.75]) tf_abs = tf.complex_abs(x) print "tf.complex_abs" print sess.run(tf_abs) # output: # tf.complex_abs # [ 5.25594902 6.60492229] ################ # tf.conj ################ # tensor 'in' is [-2.25 + 4.75j, 3.25 + 5.75j] x = tf.complex([-2.25, 3.25], [4.75, 5.75]) tf_conj = tf.conj(x) print "tf.conj" print sess.run(tf_conj) # output: # tf.conj # [-2.25-4.75j 3.25-5.75j] ################ # tf.imag ################ # tensor 'in' is [-2.25 + 4.75j, 3.25 + 5.75j] x = tf.complex([-2.25, 3.25], [4.75, 5.75]) tf_imag = tf.imag(x) print "tf.imag" print sess.run(tf_imag) # output: # tf.imag # [ 4.75 5.75] ################ # tf.real ################ # tensor 'in' is [-2.25 + 4.75j, 3.25 + 5.75j] x = tf.complex([-2.25, 3.25], [4.75, 5.75]) tf_real = tf.real(x) print "tf.real" print sess.run(tf_real) # output: # tf.real # [-2.25 3.25] sess.close()
5. テンソルの縮約
API | 機能 |
tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None) | Tensorの要素の和を計算 |
tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None)) | Tensorの要素の積を計算 |
tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None)) | Tensorの要素の最小を計算 |
tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None)) | Tensorの要素の最大を計算 |
tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None)) | Tensorの要素の平均を計算 |
tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None)) | Tensorの要素の論理積(and)を計算 |
tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None)) | Tensorの要素の論理和(or)を計算 |
tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None)) | Tensorの要素ごとに総和を計算 |
サンプルコード:
import tensorflow as tf sess = tf.InteractiveSession() ################## # tf.reduce_sum ################## # 'x' is [[1, 1, 1]] # [1, 1, 1]] x = tf.constant([[1, 1, 1], [1, 1, 1]]) tf_rsum1 = tf.reduce_sum(x) tf_rsum2 = tf.reduce_sum(x, 0) tf_rsum3 = tf.reduce_sum(x, 1) tf_rsum4 = tf.reduce_sum(x, 1, keep_dims=True) tf_rsum5 = tf.reduce_sum(x, [0, 1]) print "tf.reduce_sum" print sess.run(tf_rsum1) print sess.run(tf_rsum2) print sess.run(tf_rsum3) print sess.run(tf_rsum4) print sess.run(tf_rsum5) # output: # tf.reduce_sum # 6 # [2 2 2] # [3 3] # [[3] # [3]] # 6 ################## # tf.reduce_prod ################## x = tf.constant([[1, 2, 3], [1, 2, 3]]) tf_rprod1 = tf.reduce_prod(x) tf_rprod2 = tf.reduce_prod(x, 0) tf_rprod3 = tf.reduce_prod(x, 1) tf_rprod4 = tf.reduce_prod(x, 1, keep_dims=True) tf_rprod5 = tf.reduce_prod(x, [0, 1]) print "tf.reduce_prod" print sess.run(tf_rprod1) print sess.run(tf_rprod2) print sess.run(tf_rprod3) print sess.run(tf_rprod4) print sess.run(tf_rprod5) # output: # tf.reduce_prod # 36 # [1 4 9] # [6 6] # [[6] # [6]] # 36 ################## # tf.reduce_min ################## x = tf.constant([[1, 2, 3], [1, 2, 3]]) tf_rmin1 = tf.reduce_min(x) tf_rmin2 = tf.reduce_min(x, 0) tf_rmin3 = tf.reduce_min(x, 1) tf_rmin4 = tf.reduce_min(x, 1, keep_dims=True) tf_rmin5 = tf.reduce_min(x, [0, 1]) print "tf.reduce_min" print sess.run(tf_rmin1) print sess.run(tf_rmin2) print sess.run(tf_rmin3) print sess.run(tf_rmin4) print sess.run(tf_rmin5) # output: # tf.reduce_min # 1 # [1 2 3] # [1 1] # [[1] # [1]] # 1 ################## # tf.reduce_max ################## x = tf.constant([[1, 2, 3], [1, 2, 3]]) tf_rmax1 = tf.reduce_max(x) tf_rmax2 = tf.reduce_max(x, 0) tf_rmax3 = tf.reduce_max(x, 1) tf_rmax4 = tf.reduce_max(x, 1, keep_dims=True) tf_rmax5 = tf.reduce_max(x, [0, 1]) print "tf.reduce_max" print sess.run(tf_rmax1) print sess.run(tf_rmax2) print sess.run(tf_rmax3) print sess.run(tf_rmax4) print sess.run(tf_rmax5) # output: # tf.reduce_max # 3 # [1 2 3] # [3 3] # [[3] # [3]] # 3 ################## # tf.reduce_mean ################## # 'x' is [[1., 1. ]] # [2., 2.]] x = tf.constant([[1., 1.], [2., 2.]]) tf_rmean1 = tf.reduce_mean(x) tf_rmean2 = tf.reduce_mean(x, 0) tf_rmean3 = tf.reduce_mean(x, 1) print "tf.reduce_mean" print sess.run(tf_rmean1) print sess.run(tf_rmean2) print sess.run(tf_rmean3) # output: # tf.reduce_mean # 1.5 # [ 1.5 1.5] # [ 1. 2.] ################## # tf.reduce_all ################## # 'x' is [[True, True]] # [False, False]] x = tf.constant([[True, True], [False, False]]) tf_rall1 = tf.reduce_all(x) tf_rall2 = tf.reduce_all(x, 0) tf_rall3 = tf.reduce_all(x, 1) print "tf.reduce_all" print sess.run(tf_rall1) print sess.run(tf_rall2) print sess.run(tf_rall3) # output: # tf.reduce_all # False # [False False] # [ True False] ################## # tf.reduce_any ################## # 'x' is [[True, True]] # [False, False]] x = tf.constant([[True, True], [False, False]]) tf_rany1 = tf.reduce_any(x) tf_rany2 = tf.reduce_any(x, 0) tf_rany3 = tf.reduce_any(x, 1) print "tf.reduce_any" print sess.run(tf_rany1) print sess.run(tf_rany2) print sess.run(tf_rany3) # output: # tf.reduce_any # True # [ True True] # [ True False] ################## # tf.accumulate_n ################## # tensor 'a' is [[1, 2], [3, 4] # tensor `b` is [[5, 0], [0, 6]] a = tf.constant([[1, 2], [3, 4]]) b = tf.constant([[5, 0], [0, 6]]) tf_accum1 = tf.accumulate_n([a, b, a]) # Explicitly pass shape and type tf_accum2 = tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32) print "tf.accumulate_n" print sess.run(tf_accum1) print sess.run(tf_accum2) # output: # tf.accumulate_n # [[ 7 4] # [ 6 14]] # [[ 7 4] # [ 6 14]] sess.close()
6. セグメント
API | 機能 |
tf.segment_sum(data, segment_ids, name=None) | Tensorの各セグメントの和を計算 |
tf.segment_prod(data, segment_ids, name=None) | Tensorの各セグメントの積を計算 |
tf.segment_min(data, segment_ids, name=None) | Tensorの各セグメントの最小を計算 |
tf.segment_max(data, segment_ids, name=None) | Tensorの各セグメントの最大を計算 |
tf.segment_mean(data, segment_ids, name=None) | Tensorの各セグメントの平均を計算 |
tf.unsorted_segment_sum(data, segment_ids, num_segments, name=None) | Tensorの各セグメントの和を計算 |
tf.sparse_segment_sum(data, indices, segment_ids, name=None) | Tensorの選択したセグメントの和を計算 |
tf.sparse_segment_mean(data, indices, segment_ids, name=None) | Tensorの選択したセグメントの平均を計算 |
import tensorflow as tf sess = tf.InteractiveSession() ########################## # tf.segment_sum ########################## c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf_sum = tf.segment_sum(c, tf.constant([0, 0, 1])) print "tf.segment_sum" print sess.run(tf_sum) # output: # tf.segment_sum # [[0 0 0 0] # [5 6 7 8]] ########################## # tf.segment_prod ########################## c = tf.constant([[1,2,3,4], [1,2,3,4], [5,6,7,8]]) tf_prod = tf.segment_prod(c, tf.constant([0, 0, 1])) print "tf.segment_prod" print sess.run(tf_prod) # output: # tf.segment_prod # [[ 1 4 9 16] # [ 5 6 7 8]] ########################## # tf.segment_min ########################## c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf_min = tf.segment_min(c, tf.constant([0, 0, 1])) print "tf.segment_min" print sess.run(tf_min) # output: # tf.segment_min # [[-1 -2 -3 -4] # [ 5 6 7 8]] ########################## # tf.segment_max ########################## c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf_max = tf.segment_max(c, tf.constant([0, 0, 1])) print "tf.segment_max" print sess.run(tf_max) # output: # tf.segment_max # [[1 2 3 4] # [5 6 7 8]] ########################## # tf.segment_mean ########################## c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf_mean = tf.segment_mean(c, tf.constant([0, 0, 1])) print "tf.segment_mean" print sess.run(tf_mean) # output: # tf.segment_mean # [[0 0 0 0] # [5 6 7 8]] ########################## # tf.unsorted_segment_sum ########################## c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf_unsorted = tf.unsorted_segment_sum(c, tf.constant([1, 0, 0]), 2) print "tf.unsorted_segment_sum" print sess.run(tf_unsorted) # output: # tf.unsorted_segment_sum # [[4 4 4 4] # [1 2 3 4]] ########################## # tf.sparse_segment_sum ########################## c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) # Select two rows, one segment. tf_ssum1 = tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) # Select two rows, two segment. tf_ssum2 = tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) # Select all rows, two segments. tf_ssum3 = tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) print "tf.sparse_segment_sum" print sess.run(tf_ssum1) print sess.run(tf_ssum2) print sess.run(tf_ssum3) # output: # tf.sparse_segment_sum # [[0 0 0 0]] # [[ 1 2 3 4] # [-1 -2 -3 -4]] # [[0 0 0 0] # [5 6 7 8]] ########################## # tf.sparse_segment_mean ########################## c = tf.constant([[1.,2.,3.,4.], [-1.,-2.,-3.,-4.], [5.,6.,7.,8.]]) tf_smean1 = tf.sparse_segment_mean(c, tf.constant([0, 1]), tf.constant([0, 0])) tf_smean2 = tf.sparse_segment_mean(c, tf.constant([0, 1]), tf.constant([0, 1])) tf_smean3 = tf.sparse_segment_mean(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) print "tf.sparse_segment_mean" print sess.run(tf_smean1) print sess.run(tf_smean2) print sess.run(tf_smean3) # output: # tf.sparse_segment_mean # [[ 0. 0. 0. 0.]] # [[ 1. 2. 3. 4.] # [-1. -2. -3. -4.]] # [[ 0. 0. 0. 0.] # [ 5. 6. 7. 8.]] sess.close()
7. シーケンスの比較とインデックス
API | 機能 |
tf.argmin(input, dimension, name=None) | Tensorの指定した階層の最小値のインデックスを返す |
tf.argmax(input, dimension, name=None) | Tensorの指定した階層の最大値のインデックスを返す |
tf.listdiff(x, y, name=None) | 2つのリストの差を計算 |
tf.where(input, name=None) | Boolean TensorのTrue位置を返す |
tf.unique(x, name=None) | 1次元Tensorで重複のない要素を見つける |
tf.edit_distance(hypothesis, truth, normalize=True, name=edit_distance)<.td> | シーケンス間のレーベンシュタイン距離を計算 |
tf.invert_permutation(x, name=None) | Tensorの逆置換を計算 |
サンプルコード:
import tensorflow as tf sess = tf.InteractiveSession() ######################## # tf.argmin ######################## x = tf.constant([[1, 2, 3], [3, 2, 1]]) tf_argmin1 = tf.argmin(x, 0) tf_argmin2 = tf.argmin(x, 1) print "tf.argmin" print sess.run(tf_argmin1) print sess.run(tf_argmin2) # output: # tf.argmin # [0 0 1] # [0 2] ######################## # tf.argmax ######################## x = tf.constant([[1, 2, 3], [3, 2, 1]]) tf_argmax1 = tf.argmax(x, 0) tf_argmax2 = tf.argmax(x, 1) print "tf.argmax" print sess.run(tf_argmax1) print sess.run(tf_argmax2) # output: # tf.argmax # [1 0 0] # [2 0] ######################## # tf.listdiff ######################## x = tf.constant([1, 2, 3, 4, 5, 6]) y = tf.constant([1, 3, 5]) out, idx = tf.listdiff(x, y) print "tf.listdiff" print sess.run(out) print sess.run(idx) # output: # tf.listdiff # [2 4 6] # [1 3 5] ######################## # tf.where ######################## # 'input' tensor is [[True, False] # [True, False]] # 'input' has two true values, so output has two coordinates. # 'input' has rank of 2, so coordinates have two indices. input = tf.constant([[True, False], [True, False]]) tf_where1 = tf.where(input) # `input` tensor is [[[True, False] # [True, False]] # [[False, True] # [False, True]] # [[False, False] # [False, True]]] # 'input' has 5 true values, so output has 5 coordinates. # 'input' has rank of 3, so coordinates have three indices. input = tf.constant([[[True, False], [True, False]], [[False, True], [False, True]], [[False, False], [False, True]]]) tf_where2 = tf.where(input) print "tf.where" print sess.run(tf_where1) print sess.run(tf_where2) # output: # tf.where # [[0 0] # [1 0]] # [[0 0 0] # [0 1 0] # [1 0 1] # [1 1 1] # [2 1 1]] ######################## # tf.unique ######################## x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) y, idx = tf.unique(x) print "tf.unique" print sess.run(y) print sess.run(idx) # output: # tf.unique # [1 2 4 7 8] # [0 0 1 2 2 2 3 4 4] ######################## # tf.edit_distance ######################## # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values: # (0,0) = ["a"] # (1,0) = ["b"] hypothesis = tf.SparseTensor( indices = tf.constant([[0, 0, 0], [1, 0, 0]], "int64"), values = ["a", "b"], shape = tf.constant([2, 1, 1], "int64")) # 'truth' is a tensor of shape `[2, 2]` with variable-length values: # (0,0) = [] # (0,1) = ["a"] # (1,0) = ["b", "c"] # (1,1) = ["a"] truth = tf.SparseTensor( indices = tf.constant([[0, 1, 0], [1, 0, 0], [1, 0, 1], [1, 1, 0]], "int64"), values = ["a", "b", "c", "a"], shape = tf.constant([2, 2, 2], "int64")) tf_edit_dist = tf.edit_distance(hypothesis, truth, True) print "tf.edit_distance" print sess.run(tf_edit_dist) # output: # tf.edit_distance # [[ inf 1. ] # [ 0.5 1. ]] ######################## # tf.invert_permutation ######################## # tensor `x` is [3, 4, 0, 2, 1] x = tf.constant([3, 4, 0, 2, 1]) tf_invert = tf.invert_permutation(x) print "tf.invert_permutation" print sess.run(tf_invert) # output: # tf.invert_permutation # [2 4 3 0 1] sess.close()
やばい, やばい.
長いこと線形代数使うことなかったので, かなり忘れている.
----
参照URL:
[1] Math | TensorFlow
人工知能は人間を超えるか ディープラーニングの先にあるもの (角川EPUB選書)
|
|
|
|