12.8.数学运算
from pprint import pprint
import tensorflow as tf
Linux 5.4.0-74-generic
Python 3.9.5 @ GCC 7.3.0
Latest build date 2021.06.21
tensorflow version: 2.5.0
四则运算
tf.add(x, y, name=None)
tf.add_n(inputs, name=None)
tf.divide(x, y, name=None)
x = tf.constant([16, 12, 11])
y = tf.constant([4, 6, 2])
tf.divide(x, y)
<tf.Tensor: shape=(3,), dtype=float64, numpy=array([4. , 2. , 5.5])>
tf.multiply(x, y, name=None)
元素乘法。
x = tf.constant(([1, 2, 3, 4]))
print(tf.multiply(x, x))
x = tf.ones([1, 2])
y = tf.ones([2, 1])
print(x * y) # Taking advantage of operator overriding
tf.Tensor([ 1 4 9 16], shape=(4,), dtype=int32)
tf.Tensor(
[[1. 1.]
[1. 1.]], shape=(2, 2), dtype=float32)
tf.realdiv(x, y, name=None)
tf.truncatediv(x, y, name=None)
tf.truncatemod(x, y, name=None)
tf.subtract(x, y, name=None)
返回$x-y$。
tf.truediv(x, y, name=None)
返回$x / y$。
其他常见运算
tf.abs(x, name=None)
tf.cumsum(x, axis=0, exclusive=False, reverse=False, name=None)
# tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
x = tf.constant([2, 4, 6, 8])
print(tf.cumsum(x))
print(tf.cumsum(x, exclusive=True))
# This is more efficient than using separate tf.reverse ops
print(tf.cumsum(x, reverse=True))
tf.Tensor([ 2 6 12 20], shape=(4,), dtype=int32)
tf.Tensor([ 0 2 6 12], shape=(4,), dtype=int32)
tf.Tensor([20 18 14 8], shape=(4,), dtype=int32)
tf.exp(x, name=None)
$y=e^x$
tf.floor(x, name=None)
x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
tf.floor(x).numpy()
array([ 1., -2., 5., -3., 0., inf], dtype=float32)
tf.maximum(x, y, name=None)
tf.minimum(x, y, name=None)
tf.negative(x, name=None)
tf.pow(x, y, name=None)
返回$x^y$。
tf.round(x, name=None)
tf.sqrt(x, name=None)
返回$\sqrt{x}$。
tf.square(x, name=None)
返回$x^2$。
tf.sigmoid(x, name=None)
tf.sign(x, name=None)
tf.scalar_mul(scalar, x, name=None)
降维函数
tf.reduce_all(input_tensor, axis=None, keepdims=False, name=None)
x = tf.constant([[True, False], [True, True]])
print(tf.reduce_all(x))
print(tf.reduce_all(x, 0))
print(tf.reduce_all(x, 1))
print(tf.reduce_all(x, 1, True))
tf.Tensor(False, shape=(), dtype=bool)
tf.Tensor([ True False], shape=(2,), dtype=bool)
tf.Tensor([False True], shape=(2,), dtype=bool)
tf.Tensor(
[[False]
[ True]], shape=(2, 1), dtype=bool)
tf.reduce_any(input_tensor, axis=None, keepdims=False, name=None)
tf.reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None)
tf.reduce_max(input_tensor, axis=None, keepdims=False, name=None)
tf.reduce_mean(input_tensor, axis=None, keepdims=False, name=None)
tf.reduce_min(input_tensor, axis=None, keepdims=False, name=None)
tf.reduce_prod(input_tensor, axis=None, keepdims=False, name=None)
tf.reduce_sum(input_tensor, axis=None, keepdims=False, name=None)
三角函数
tf.acos(x, name=None)
tf.acosh(x, name=None)
tf.asin(x, name=None)
tf.asinh(x, name=None)
tf.atan(x, name=None)
tf.atan2(y, x, name=None)
tf.atanh(x, name=None)
tf.cos(x, name=None)
tf.cosh(x, name=None)
tf.sin(x, name=None)
tf.sinh(x, name=None)
tf.tan(x, name=None)
tf.tanh(x, name=None)
逻辑函数
tf.equal(x, y, name=None)
tf.not_equal(x, y, name=None)
tf.greater(x, y, name=None)
tf.greater_equal(x, y, name=None)
tf.less(x, y, name=None)
tf.less_equal(x, y, name=None)
tf.logical_not(x, name=None)
tf.logical_or(x, y, name=None)
tf.logical_and(x, y, name=None)
线性代数
tf.matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b, a_is_sparse, b_is_sparse)
矩阵乘法。
transpose_a
/transpose_b
:如果True
,a
/b
在乘法之前转置。adjoint_a
/adjoint_b
:如果True
,a
/b
在乘法之前共轭和转置。a_is_sparse
/b_is_sparse
:如果True
,a
/b
被视为稀疏矩阵。请注意,这不支持tf.sparse.SparseTensor
。
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
print(a)
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
print(b)
c = tf.matmul(a, b)
print(c)
tf.Tensor(
[[1 2 3]
[4 5 6]], shape=(2, 3), dtype=int32)
tf.Tensor(
[[ 7 8]
[ 9 10]
[11 12]], shape=(3, 2), dtype=int32)
tf.Tensor(
[[ 58 64]
[139 154]], shape=(2, 2), dtype=int32)
tf.tensordot(a, b, axes, name=None)
$\mathbf{x}^\top \mathbf{y} = \sum_{i=1}^{d} x_i y_i$