我正在尝试使用Tensorflow.js计算与网络可训练权重有关的损耗梯度,以便将这些梯度应用于网络的权重。在python中,可以使用tf.gradients()函数轻松完成此操作,该函数接受两个表示dx和dy的最小输入。 但是,我无法在Tensorflow.js中重现该行为。我不确定我对权重损失梯度的理解是否错误,或者我的代码是否包含错误。
我花了一些时间分析tfjs-node包的核心代码,以了解当我们调用函数tf.model.fit()时是如何完成的,但到目前为止收效甚微。
let model = build_model(); //Two stacked dense layers followed by two parallel dense layers for the output
let loss = compute_loss(...); //This function returns a tf.Tensor of shape [1] containing the mean loss for the batch.
const f = () => loss;
const grad = tf.variableGrads(f);
grad(model.getWeights());
model.getWeights()函数返回一个tf.variable()数组,因此我假设该函数将为每一层计算dL / dW,稍后我可以将其应用于网络的权重,但是,我收到此错误的情况:
Error: Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.
我不太了解此错误的含义。 我应该如何使用Tensorflow.js计算损失的梯度(类似于Python中的tf.gradients())?
编辑: 这是计算损失的函数:
function compute_loss(done, new_state, memory, agent, gamma=0.99) {
let reward_sum = 0.;
if(done) {
reward_sum = 0.;
} else {
reward_sum = agent.call(tf.oneHot(new_state, 12).reshape([1, 9, 12]))
.values.flatten().get(0);
}
let discounted_rewards = [];
let memory_reward_rev = memory.rewards;
for(let reward of memory_reward_rev.reverse()) {
reward_sum = reward + gamma * reward_sum;
discounted_rewards.push(reward_sum);
}
discounted_rewards.reverse();
let onehot_states = [];
for(let state of memory.states) {
onehot_states.push(tf.oneHot(state, 12));
}
let init_onehot = onehot_states[0];
for(let i=1; i<onehot_states.length;i++) {
init_onehot = init_onehot.concat(onehot_states[i]);
}
let log_val = agent.call(
init_onehot.reshape([memory.states.length, 9, 12])
);
let disc_reward_tensor = tf.tensor(discounted_rewards);
let advantage = disc_reward_tensor.reshapeAs(log_val.values).sub(log_val.values);
let value_loss = advantage.square();
log_val.values.print();
let policy = tf.softmax(log_val.logits);
let logits_cpy = log_val.logits.clone();
let entropy = policy.mul(logits_cpy.mul(tf.scalar(-1)));
entropy = entropy.sum();
let memory_actions = [];
for(let i=0; i< memory.actions.length; i++) {
memory_actions.push(new Array(2000).fill(0));
memory_actions[i][memory.actions[i]] = 1;
}
memory_actions = tf.tensor(memory_actions);
let policy_loss = tf.losses.softmaxCrossEntropy(memory_actions.reshape([memory.actions.length, 2000]), log_val.logits);
let value_loss_copy = value_loss.clone();
let entropy_mul = (entropy.mul(tf.scalar(0.01))).mul(tf.scalar(-1));
let total_loss_1 = value_loss_copy.mul(tf.scalar(0.5, dtype='float32'));
let total_loss_2 = total_loss_1.add(policy_loss);
let total_loss = total_loss_2.add(entropy_mul);
total_loss.print();
return total_loss.mean();
}
编辑2:
我设法将compute_loss用作在model.compile()上指定的损失函数。但是然后,它只需要两个输入(预测,标签),所以对我来说不起作用,因为我想输入多个参数。
我在这件事上迷失了。
答案 0 :(得分:1)
错误说明了一切。
您的问题与tf.variableGrads有关。 loss
应该是使用所有可用的tf
张量运算符计算的标量。 loss
不应返回您问题中指示的张量。
以下是损失的示例:
const a = tf.variable(tf.tensor1d([3, 4]));
const b = tf.variable(tf.tensor1d([5, 6]));
const x = tf.tensor1d([1, 2]);
const f = () => a.mul(x.square()).add(b.mul(x)).sum(); // f is a function
// df/da = x ^ 2, df/db = x
const {value, grads} = tf.variableGrads(f); // gradient of f as respect of each variable
Object.keys(grads).forEach(varName => grads[varName].print());
/!\请注意,梯度是根据使用tf.variable
更新:
您没有计算出应有的梯度。解决方法是这里。
function compute_loss(done, new_state, memory, agent, gamma=0.99) {
const f = () => { let reward_sum = 0.;
if(done) {
reward_sum = 0.;
} else {
reward_sum = agent.call(tf.oneHot(new_state, 12).reshape([1, 9, 12]))
.values.flatten().get(0);
}
let discounted_rewards = [];
let memory_reward_rev = memory.rewards;
for(let reward of memory_reward_rev.reverse()) {
reward_sum = reward + gamma * reward_sum;
discounted_rewards.push(reward_sum);
}
discounted_rewards.reverse();
let onehot_states = [];
for(let state of memory.states) {
onehot_states.push(tf.oneHot(state, 12));
}
let init_onehot = onehot_states[0];
for(let i=1; i<onehot_states.length;i++) {
init_onehot = init_onehot.concat(onehot_states[i]);
}
let log_val = agent.call(
init_onehot.reshape([memory.states.length, 9, 12])
);
let disc_reward_tensor = tf.tensor(discounted_rewards);
let advantage = disc_reward_tensor.reshapeAs(log_val.values).sub(log_val.values);
let value_loss = advantage.square();
log_val.values.print();
let policy = tf.softmax(log_val.logits);
let logits_cpy = log_val.logits.clone();
let entropy = policy.mul(logits_cpy.mul(tf.scalar(-1)));
entropy = entropy.sum();
let memory_actions = [];
for(let i=0; i< memory.actions.length; i++) {
memory_actions.push(new Array(2000).fill(0));
memory_actions[i][memory.actions[i]] = 1;
}
memory_actions = tf.tensor(memory_actions);
let policy_loss = tf.losses.softmaxCrossEntropy(memory_actions.reshape([memory.actions.length, 2000]), log_val.logits);
let value_loss_copy = value_loss.clone();
let entropy_mul = (entropy.mul(tf.scalar(0.01))).mul(tf.scalar(-1));
let total_loss_1 = value_loss_copy.mul(tf.scalar(0.5, dtype='float32'));
let total_loss_2 = total_loss_1.add(policy_loss);
let total_loss = total_loss_2.add(entropy_mul);
total_loss.print();
return total_loss.mean().asScalar();
}
return tf.variableGrads(f);
}
请注意,您可能会很快遇到内存消耗问题。建议使用tf.tidy
包围的微分函数来处理张量。