-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfake_test.py
66 lines (52 loc) · 1.85 KB
/
fake_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import tensorflow as tf
import numpy as np
import estimator
tf.enable_eager_execution()
fake_train = np.random.uniform(-10, 10, size=[1024, 50]).astype(np.float32)
fake_dev = np.random.uniform(-10, 10, size=[128, 50]).astype(np.float32)
def _map(x):
y = tf.reduce_sum(x, keep_dims=True) > 0
y = tf.cast(y, tf.float32)
return {"input": x, "label": y}
def data_fn():
train_data = tf.data.Dataset.from_tensor_slices(fake_train)
train_data = train_data.repeat()
train_data = train_data.map(_map)
train_data = train_data.batch(128, drop_remainder=True)
dev_data = tf.data.Dataset.from_tensor_slices(fake_dev)
dev_data = dev_data.repeat()
dev_data = dev_data.map(_map)
dev_data = dev_data.batch(128, drop_remainder=True)
data_spec = estimator.DataSpec(train=train_data, dev=dev_data)
return data_spec
def model_fn(data, training):
layer1 = tf.keras.layers.Dense(50, "tanh")
layer2 = tf.keras.layers.Dense(1, "sigmoid")
dropout = 0.1 * tf.cast(training, tf.float32)
x = data["input"]
x = layer1(x)
x = tf.nn.dropout(x, rate=dropout)
x = layer2(x)
y = data["label"]
loss = tf.keras.losses.binary_crossentropy(y, x)
loss = tf.reduce_mean(loss)
accuracy = tf.keras.metrics.binary_accuracy(y, x)
accuracy = tf.reduce_mean(accuracy)
metric = {"accuracy": accuracy}
optimizer = tf.train.GradientDescentOptimizer(1e-3)
trainable_variables = layer1.weights + layer2.weights
model_spec = estimator.ModelSpec(
loss=loss,
optimizer=optimizer,
trainable_variables=trainable_variables,
metric=metric
)
return model_spec
run_config = estimator.RunConfig(
train_steps_per_round=1000,
eval_steps_per_round=20,
model_dir="model_dir",
save_every_rounds=40,
)
estm = estimator.Estimator(model_fn, data_fn, run_config)
estm.run(200)