-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathops.py
93 lines (75 loc) · 3.27 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import numpy as np
import tensorflow as tf
def batch_norm(opts, _input, is_train, reuse, scope, scale=True):
"""Batch normalization based on tf.contrib.layers.
"""
return tf.contrib.layers.batch_norm(
_input, center=True, scale=scale,
epsilon=opts['batch_norm_eps'], decay=opts['batch_norm_decay'],
is_training=is_train, reuse=reuse, updates_collections=None,
scope=scope, fused=False)
def linear(opts, input_, output_dim, scope=None, init='normal', reuse=None):
stddev = opts['init_std']
bias_start = opts['init_bias']
shape = input_.get_shape().as_list()
assert len(shape) > 0
in_shape = shape[1]
if len(shape) > 2:
input_ = tf.reshape(input_, [-1, np.prod(shape[1:])])
in_shape = np.prod(shape[1:])
with tf.variable_scope(scope or "lin", reuse=reuse):
if init == 'normal':
matrix = tf.get_variable(
"W", [in_shape, output_dim], tf.float32,
tf.random_normal_initializer(stddev=stddev))
else:
matrix = tf.get_variable(
"W", [in_shape, output_dim], tf.float32,
tf.constant_initializer(np.identity(in_shape)))
bias = tf.get_variable(
"b", [output_dim],
initializer=tf.constant_initializer(bias_start))
return tf.matmul(input_, matrix) + bias
def conv2d(opts, input_, output_dim, d_h=2, d_w=2, scope=None,
conv_filters_dim=None, padding='SAME', l2_norm=False):
stddev = opts['init_std']
bias_start = opts['init_bias']
shape = input_.get_shape().as_list()
if conv_filters_dim is None:
conv_filters_dim = opts['conv_filters_dim']
k_h = conv_filters_dim
k_w = k_h
assert len(shape) == 4, 'Conv2d works only with 4d tensors.'
with tf.variable_scope(scope or 'conv2d'):
w = tf.get_variable(
'filter', [k_h, k_w, shape[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
if l2_norm:
w = tf.nn.l2_normalize(w, 2)
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable(
'b', [output_dim],
initializer=tf.constant_initializer(bias_start))
conv = tf.nn.bias_add(conv, biases)
return conv
def deconv2d(opts, input_, output_shape, d_h=2, d_w=2, scope=None, conv_filters_dim=None, padding='SAME'):
stddev = opts['init_std']
shape = input_.get_shape().as_list()
if conv_filters_dim is None:
conv_filters_dim = opts['conv_filters_dim']
k_h = conv_filters_dim
k_w = k_h
assert len(shape) == 4, 'Conv2d_transpose works only with 4d tensors.'
assert len(output_shape) == 4, 'outut_shape should be 4dimensional'
with tf.variable_scope(scope or "deconv2d"):
w = tf.get_variable(
'filter', [k_h, k_w, output_shape[-1], shape[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(
input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1], padding=padding)
biases = tf.get_variable(
'b', [output_shape[-1]],
initializer=tf.constant_initializer(0.0))
deconv = tf.nn.bias_add(deconv, biases)
return deconv