def get_loss(prediction, label):
return tf.losses.absolute_difference(tf.expand_dims(label, -1), prediction)
def make_train_op(opt, loss):
apply_gradient_op = opt.minimize(loss)
return apply_gradient_op
def evaluate_mae(prediction, label):
return tf.metrics.mean_absolute_error(tf.cast(label, tf.float32), prediction)
optimizer = tf.train.AdamOptimizer()
global_step = tf.train.get_or_create_global_step()
with tf.variable_scope('input', reuse=True):
training_inp = InputPipe()
data_size = training_inp.data_size
validate_inp = InputPipe(is_train=False)
model = Model(training_inp.feature_num)
scope = tf.get_variable_scope()
training_prediction, train_true_y = model(training_inp)
train_loss = get_loss(training_prediction, train_true_y)
train_op = make_train_op(optimizer, train_loss)
validate_prediction, validate_true_y = model(validate_inp)
validate_loss = evaluate_mae(validate_prediction, validate_true_y)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
training_inp.init_sess(sess)
list_grads = []
for epoch in range(n_repeats):
tqr = trange(steps_per_epoch, desc="%2d" % (epoch + 1), leave=False)
for _ in tqr:
op, train_loss_value, validate_loss_value = sess.run([train_op, train_loss, validate_loss])
不合并或任何复杂的
答案 0 :(得分:0)
您看不到其他分支的更改。如果要检查其他分支提交,则必须签出该分支才能看到该提交。
如果您想查看在分支上所做的更改,可以运行git diff
。然后您可以看到与之前的内容有何实际不同。如果要对所有分支进行相同的提交,则必须合并提交。