我是该论坛的新手,所以如果我违反一些约定,请提醒我。 我正在尝试实现ANN以解决PDE,但是我得到标题中引用的错误消息。
这是我的代码:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
tf.__version__
'2.2.0'
我用于采样域的函数和损失函数(这似乎是问题所在)
# Sampling
def sampler(N1, N2, N3):
np.random.seed(1)
# Sampler #1: PDE domain
t1 = np.random.uniform(low=T0 - 0.5*(T - T0),
high=T,
size=[N1,1])
s1 = np.random.uniform(low=S1 - (S2 - S1)*0.5,
high=S2 + (S2 - S1)*0.5,
size=[N1,1])
# Sampler #2: boundary condition
t2 = np.zeros(shape=(1, 1))
s2 = np.zeros(shape=(1, 1))
# Sampler #3: initial/terminal condition
t3 = T * np.ones((N3,1)) #Terminal condition
s3 = np.random.uniform(low=S1 - (S2 - S1)*0.5,
high=S2 + (S2 - S1)*0.5,
size=[N3,1])
return (t1, s1, t2, s2, t3, s3)
# Loss function
def loss(model, t1, x1, t2, x2, t3, x3):
# Loss term #1: PDE
V = model(t1, x1)
V_t = tf.gradients(V, t1)[0]
V_x = tf.gradients(V, x1)[0]
V_xx = tf.gradients(V_x, x1)[0]
f = V_t + r*x1*V_x + 0.5*sigma**2*x1**2*V_xx - r*V
L1 = tf.reduce_mean(tf.square(f))
# Loss term #2: boundary condition
L2 = tf.reduce_mean(tf.square(V))
# Loss term #3: initial/terminal condition
L3 = tf.reduce_mean(tf.square(model(t3, x3) - x3))
return (L1, L2, L3)
模型
# Set random seeds
np.random.seed(42)
tf.random.set_seed(42)
# PDE parameters
r = 0.05 # Interest rate
sigma = 0.25 # Volatility
# Time limits
T0 = 0.0 + 1e-10 # Initial time
T = 1.0 # Terminal time
# Space limits
S1 = 0.0 + 1e-10 # Low boundary
S2 = 1 # High boundary
#Model specification
num_layers = 3
nodes_per_layer = 6
ann = tf.keras.models.Sequential()
#Adding the Input and hidden layers
i=0
while i != num_layers:
ann.add(tf.keras.layers.Dense(units=nodes_per_layer, activation='relu'))
i += 1
#Adding the output layer
ann.add(tf.keras.layers.Dense(units=1, activation='relu'))
# Training parameters
steps_per_sample = 10
sampling_stages = 800
# Number of samples
NS_1 = 1000
NS_2 = 0
NS_3 = 100
t1, s1, t2, s2, t3, s3 = sampler(NS_1, NS_2, NS_3)
t_total = np.concatenate((t1,t2,t3))
s_total = np.concatenate((s1,s2,s3))
tf.compat.v1.disable_eager_execution()
t1_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x1_t = tf.compat.v1.placeholder(tf.float32, [None,1])
t2_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x2_t = tf.compat.v1.placeholder(tf.float32, [None,1])
t3_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x3_t = tf.compat.v1.placeholder(tf.float32, [None,1])
L1_t, L2_t, L3_t = loss(ann, t1_t, x1_t, t2_t, x2_t, t3_t, x3_t)
loss_t = L1_t + L2_t + L3_t
最后一行是我收到此错误消息的地方:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-3acdd7516b24> in <module>()
7 x3_t = tf.compat.v1.placeholder(tf.float32, [None,1])
8
----> 9 L1_t, L2_t, L3_t = loss(ann, t1_t, x1_t, t2_t, x2_t, t3_t, x3_t)
10 loss_t = L1_t + L2_t + L3_t
3 frames
<ipython-input-4-2ab32c0f7a88> in loss(model, t1, x1, t2, x2, t3, x3)
5 V_t = tf.gradients(V, t1)[0]
6 V_x = tf.gradients(V, x1)[0]
----> 7 V_xx = tf.gradients(V_x, x1)[0]
8 f = V_t + r*x1*V_x + 0.5*sigma**2*x1**2*V_xx - r*V
9
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_impl.py in gradients_v2(ys, xs, grad_ys, name, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients)
300 ys, xs, grad_ys, name, True, gate_gradients,
301 aggregation_method, stop_gradients,
--> 302 unconnected_gradients)
303 # pylint: enable=protected-access
304
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_util.py in _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops, gate_gradients, aggregation_method, stop_gradients, unconnected_gradients, src_graph)
534 xs_set = object_identity.ObjectIdentitySet(xs)
535 grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops,
--> 536 gradient_uid)
537
538 # The approach we take here is as follows: Create a list of all ops in the
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gradients_util.py in _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, gradient_uid)
165 new_grad_ys = []
166 for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):
--> 167 with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
168 if grad_y is None:
169 if y.dtype.is_complex:
AttributeError: 'NoneType' object has no attribute 'op'
谢谢您的帮助!
答案 0 :(得分:0)
错误是由于前一层的输出没有馈送到后续层。下面的代码需要修改以从前面的层中取出
#Adding the Input and hidden layers
i = 0
while i != num_layers:
ann.add(tf.keras.layers.Dense(units=nodes_per_layer, activation='relu'))
i += 1