Tensorflow 2.0 最小逻辑

  1. tf.function 功能

tf.function 基本上就是将函数建图的一个修饰器语法

2. tf barebone 运行

variables = [a, b]

num_epoch = 10000
optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)

for e in range(num_epoch):

    # 1 使用tf.GradientTape()记录损失函数的梯度信息
    with tf.GradientTape() as tape:
    	# 2 tf 函数
    	y_pred = a * X + b
        loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y))
    
    # 3 TensorFlow自动计算损失函数关于自变量(模型参数)的梯度
    grads = tape.gradient(loss, variables)
    
    # 4 TensorFlow自动根据梯度更新参数
    optimizer.apply_gradients(grads_and_vars=zip(grads, variables))  
X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])


class Linear(tf.keras.Model):
    def __init__(self):
        super().__init__()
        self.dense = tf.keras.layers.Dense(
            units=1,
            activation=None,
            kernel_initializer=tf.zeros_initializer(),
            bias_initializer=tf.zeros_initializer()
        )

    def call(self, input):
        output = self.dense(input)
        return output


# 以下代码结构与前节类似
model = Linear()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
for i in range(100):
    with tf.GradientTape() as tape:
    	# 调用模型 y_pred = model(X) 而不是显式写出 y_pred = a * X + b
       	y_pred = model(X)      
        loss = tf.reduce_mean(tf.square(y_pred - y))
    # 使用 model.variables 这一属性直接获得模型中的所有变量
    grads = tape.gradient(loss, model.variables)    
    optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
print(model.variables)

3. 列出所有CPU和GPU

gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
cpus = tf.config.experimental.list_physical_devices(device_type='CPU')
print(gpus, cpus)
comments powered by Disqus