- def encoder(X_in, keep_prob):
- activation = lrelu with tf.variable_scope("encoder", reuse=None):
- X = tf.reshape(X_in, shape=[-1, 28, 28, 1])
- x = tf.layers.conv2d(X, filters=64, kernel_size=4, strides=2, padding='same', activation=activation)
- x = tf.nn.dropout(x, keep_prob)
- x = tf.layers.conv2d(x, filters=64, kernel_size=4, strides=2, padding='same', activation=activation)
- x = tf.nn.dropout(x, keep_prob)
- x = tf.layers.conv2d(x, filters=64, kernel_size=4, strides=1, padding='same', activation=activation)
- x = tf.nn.dropout(x, keep_prob)
- x = tf.contrib.layers.flatten(x)
- mn = tf.layers.dense(x, units=n_latent)
- sd = 0.5 * tf.layers.dense(x, units=n_latent)
- epsilon = tf.random_normal(tf.stack([tf.shape(x)[0], n_latent]))
- z = mn + tf.multiply(epsilon, tf.exp(sd))
- return z, mn, sd
定义解码器
解码器不会关怀输入值是不是大年夜我们定义的某个特定分布中采样获得的。它仅仅会测验测验重建输入图像。最后,我们应用了一系列的转置卷积(transpose convolution)。
- def decoder(sampled_z, keep_prob): with tf.variable_scope("decoder", reuse=None):
- x = tf.layers.dense(sampled_z, units=inputs_decoder, activation=lrelu)
- x = tf.layers.dense(x, units=inputs_decoder * 2 + 1, activation=lrelu)
- x = tf.reshape(x, reshaped_dim)
- x = tf.layers.conv2d_transpose(x, filters=64, kernel_size=4, strides=2, padding='same', activation=tf.nn.relu)
- x = tf.nn.dropout(x, keep_prob)
- x = tf.layers.conv2d_transpose(x, filters=64, kernel_size=4, strides=1, padding='same', activation=tf.nn.relu)
- x = tf.nn.dropout(x, keep_prob)
- x = tf.layers.conv2d_transpose(x, filters=64, kernel_size=4, strides=1, padding='same', activation=tf.nn.relu)
- x = tf.contrib.layers.flatten(x)
推荐阅读
Tech Neo技巧沙龙 | 11月25号,九州云/ZStack与您一路商量云时代收集界线治理实践 有若干法度榜样员,就有若干定义。所以我只询问了一些异常有名且经验丰富的法度榜样员。 优良法度榜样员>>>详细阅读
本文标题:如何使用TensorFlow和自编码器模型生成手写数字
地址:http://www.17bianji.com/lsqh/38737.html
1/2 1