帮助!我使用cloudml_train("model.R", master_type = "complex_model_m_p100")
在CloudML上训练了一个模型。现在需要训练有素的模型。我没有在模型中指定适合保存的任何内容...假设它将在最后一个时期之后使用job_collect()
返回权重。
job_collect()
确实返回了训练输入jobDir:gs:// project / r-cloudml / staging
有什么方法可以获取模型权重?还是使用适用于Google的回调设置脚本?这是脚本
library(keras)
load("sspr.ndvi.tensor.RData")
load("sspr.highdem.tensor.RData")
load("sspr.lowdem.tensor.RData")
load("yspr.ndvi.tensor.RData")
load("yspr.highdem.tensor.RData")
load("yspr.lowdem.tensor.RData")
#model!
highres.crop.input<-layer_input(shape = c(51,51,1),name = "highres.crop_input")
lowdem.input<-layer_input(shape = c(51,51,1),name = "lowdem.input")
lowdem_output<-lowdem.input %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 14,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_batch_normalization() %>%
layer_average_pooling_2d(pool_size = c(17,17)) %>%
layer_upsampling_2d(size = c(51,51),name = "lowdem_output")
inception_input0<- highres.crop.input %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(2, 2), filter = 16,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input0")
inception_output0<-inception_input0 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 16,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 16,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output0")
cnn_inter_output0<-layer_add(c(inception_input0,inception_output0,lowdem_output)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output0")
added_inception_highres0<-layer_add(c(highres.crop.input,cnn_inter_output0)) %>%
layer_conv_2d(kernel_size = c(1,1),filters = 4,
activation = "relu",padding = "same",name = "added_inception_highres0")
#### 1 ####
inception_input1<- added_inception_highres0 %>%
layer_gaussian_dropout(rate = 0.35) %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(3, 3), filter = 8,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input1")
inception_output1<-inception_input1 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 8,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 8,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output1")
cnn_inter_output1<-layer_add(c(inception_input1,inception_output1)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output1")
added_inception_highres1<-cnn_inter_output1 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 2,
activation = "relu",padding = "same",name = "added_inception_highres1")
#### 2 ####
inception_input2<- added_inception_highres1 %>%
layer_conv_2d(kernel_size = c(3, 3), strides = 1, filter = 16,
activation = "relu", padding = "same",
data_format = "channels_last") %>%
layer_conv_2d(kernel_size = c(3, 3), filter = 8,
activation = "relu", padding = "same") %>%
layer_batch_normalization(name = "inception_input2")
inception_output2<-inception_input2 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same") %>%
layer_max_pooling_2d(pool_size = c(3,3)) %>%
layer_conv_2d(kernel_size = c(1,7),filters = 8,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(7,1),filters = 8,
activation = "relu",padding = "same") %>%
layer_upsampling_2d(size = c(3,3), interpolation = "nearest",name = "inception_output2")
cnn_inter_output2<-layer_add(c(inception_input2,inception_output2)) %>%
layer_conv_2d(kernel_size = c(1,5),filters = 6,
activation = "relu",padding = "same") %>%
layer_conv_2d(kernel_size = c(5,1),filters = 6,
activation = "relu",padding = "same",name = "cnn_inter_output2")
added_inception_highres2<-cnn_inter_output2 %>%
layer_conv_2d(kernel_size = c(1,1),filters = 1,
activation = "relu",padding = "same",name = "added_inception_highres2")
incept_dual<-keras_model(
inputs = c(highres.crop.input,lowdem.input),
outputs = added_inception_highres2
)
summary(incept_dual)
incept_dual %>% compile(loss = 'mse',
optimizer = 'Nadam',
metric = "mse")
incept_dual %>% fit(
x = list(highres.crop_input = sspr.highdem.tensor, lowdem.input = sspr.lowdem.tensor),
y = list(added_inception_highres2 = sspr.ndvi.tensor),
epochs = 1000,
batch_size = 32,
validation_data=list(list(yspr.highdem.tensor,yspr.lowdem.tensor),yspr.ndvi.tensor),
shuffle = TRUE
)
答案 0 :(得分:0)
您似乎想使用R代码从gs:// project / r-cloudml / staging加载模型以分析权重。
cloudml R库具有gs_copy
功能(https://cran.r-project.org/web/packages/cloudml/cloudml.pdf的第6页),该功能可能会有所帮助。
您可能需要使用gcloud auth
授权对GCS的访问。然后,您应该可以使用gs_copy(gs://project/r-cloudml/staging, /local/directory)
将保存的模型移至R环境(如Jupyter或RStudio)
从那里您应该能够使用普通的Keras R库命令来加载/分析权重模型。 https://keras.rstudio.com/articles/tutorial_save_and_restore.html
答案 1 :(得分:0)
答案是在脚本中定义没有父路径的目录
checkpoint_path="five_epoch_checkpoint.ckpt"
lastditch_callback <- callback_model_checkpoint(
filepath = checkpoint_path,
save_weights_only = TRUE,
save_best_only = FALSE,
save_freq = 5,
period = 5,
verbose = 0
)
best_path = "best.ckpt"
bestmod_callback <- callback_model_checkpoint(
filepath = best_path,
save_weights_only = TRUE,
save_best_only = TRUE,
mode = "auto",
verbose = 0
)
incept_dual %>% fit(
x = list(highres.crop_input = sspr.highdem.tensor, lowdem.input = sspr.lowdem.tensor),
y = list(prediction = sspr.ndvi.tensor),
epochs = 50,
batch_size = 32,
validation_data=list(list(yspr.highdem.tensor,yspr.lowdem.tensor),yspr.ndvi.tensor),
callbacks = list(lastditch_callback,bestmod_callback),
shuffle = TRUE
)
save_model_hdf5(incept_dual,"incept_dual.h5")
five_epoch_checkpoint.ckpt
,best.ckpt
和incept_dual.h5
都将出现在模型结果自动保存到的Google存储桶中。不幸的是,我无法检索模型,但是我可以保存检查点,并且将来的最终模型也可以运行。