所以,我有这种数据
startTime: Fri Dec 28 2018 01:15:00 GMT+0200 (Eastern European Standard Time)
还有这个
endTime: Mon Dec 31 2018 02:15:00 GMT+0200 (Eastern European Standard Time)
数据始终采用这种格式。我需要对其进行解析,这样我才能在startTime和endTime的时间(在这种情况下为1:15-2:15)的每一天中,都有一个介于startTime和endTime之间的所有日期的对象数组。
有什么想法吗?我得到的答案是
function toDays(startDateString, endDateString) {
const startDate = moment(startDateString, 'dddd MMM DD YYYY');
const endDate = moment(endDateString, 'dddd MMM DD YYYY');
const dates = [];
while(startDate.isSameOrBefore(endDate, 'day')) {
let currentDay = startDate.format('dddd');
dates[currentDay] = [];
dates[currentDay].push({start:'9:00', end:'18:00'});
startDate.add(1, 'days');
}
return dates;
}
const result = toDays('Mon Dec 24 2018', 'Fri Dec 28 2018');
console.log(result);
但是我不能真正使用它,我不确定如何解决此问题,因为我之前从未使用过。
答案 0 :(得分:1)
将天名添加到您创建的新对象中,作为day
之类的属性,并将整个对象推入数组
更改
dates[currentDay] = [];
dates[currentDay].push({start:'9:00', end:'18:00'});
收件人
dates.push({ day: currentDay, start:'9:00', end:'18:00'})
答案 1 :(得分:1)
如果我正确理解了这个问题,则可能是您在弄清楚如何解析格式'''The idea is to create xt, yt, xval and yval. My numpy arrays to
be fed are of the following shapes:
The 3D xt array has a shape of: (11, 69579, 74)
The 3D xval array has a shape of: (11, 7732, 74)
The yt array has a shape of: (69579, 3)
The yval array has a shape of: (7732, 3)
'''
N_TIMESTEPS_X = xt.shape[0] ## The stack number
BATCH_SIZE = 256
#N_OBSERVATIONS = xt.shape[1]
N_FEATURES = xt.shape[2]
N_OUTPUTS = yt.shape[1]
N_NEURONS_LSTM = 128 ## Number of units in the LSTMCell
N_NEURONS_DENSE = 64 ## Number of units in the Dense layer
N_EPOCHS = 600
LEARNING_RATE = 0.1
### Define the placeholders anda gather the data.
train_data = (xt, yt)
validation_data = (xval, yval)
## We define the placeholders as a trick so that we do not break into memory problems, associated with feeding the data directly.
'''As an alternative, you can define the Dataset in terms of tf.placeholder() tensors, and feed the NumPy arrays when you initialize an Iterator over the dataset.'''
batch_size = tf.placeholder(tf.int64)
x = tf.placeholder(tf.float32, shape=[None, N_TIMESTEPS_X, N_FEATURES], name='XPlaceholder')
y = tf.placeholder(tf.float32, shape=[None, N_OUTPUTS], name='YPlaceholder')
# Creating the two different dataset objects.
train_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(BATCH_SIZE).repeat()
val_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(BATCH_SIZE)
# Creating the Iterator type that permits to switch between datasets.
itr = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)
train_init_op = itr.make_initializer(train_dataset)
validation_init_op = itr.make_initializer(val_dataset)
next_features, next_labels = itr.get_next()
### Create the graph
cellType = tf.nn.rnn_cell.LSTMCell(num_units=N_NEURONS_LSTM, name='LSTMCell')
inputs = tf.unstack(next_features, N_TIMESTEPS_X, axis=0)
'''inputs: A length T list of inputs, each a Tensor of shape [batch_size, input_size]'''
RNNOutputs, _ = tf.nn.static_rnn(cell=cellType, inputs=inputs, dtype=tf.float32)
predictionsLayer = tf.layers.dense(inputs=tf.layers.batch_normalization(RNNOutputs[-1]), units=N_NEURONS_DENSE, activation=None, name='Dense_Layer')
### Define the cost function, that will be optimized by the optimizer.
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=predictionsLayer, labels=next_labels, name='Softmax_plus_Cross_Entropy'))
optimizer_type = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, name='AdamOptimizer')
optimizer = optimizer_type.minimize(cost)
### Model evaluation
correctPrediction = tf.equal(tf.argmax(predictionsLayer,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction,tf.float32))
#confusionMatrix = tf.confusion_matrix(next_labels, predictionsLayer, num_classes=3, name='ConfMatrix')
N_BATCHES = train_data[0].shape[0] // BATCH_SIZE
## Saving variables so that we can restore them afterwards.
saver = tf.train.Saver()
save_dir = '/home/zmlaptop/Desktop/tfModels/{}_{}'.format(cellType.__class__.__name__, datetime.now().strftime("%Y%m%d%H%M%S"))
os.mkdir(save_dir)
varDict = {'nTimeSteps':N_TIMESTEPS_X, 'BatchSize': BATCH_SIZE, 'nFeatures':N_FEATURES,
'nNeuronsLSTM':N_NEURONS_LSTM, 'nNeuronsDense':N_NEURONS_DENSE, 'nEpochs':N_EPOCHS,
'learningRate':LEARNING_RATE, 'optimizerType': optimizer_type.__class__.__name__}
varDicSavingTxt = save_dir + '/varDict.txt'
modelFilesDir = save_dir + '/modelFiles'
os.mkdir(modelFilesDir)
logDir = save_dir + '/TBoardLogs'
os.mkdir(logDir)
acc_summary = tf.summary.scalar('Accuracy', accuracy)
loss_summary = tf.summary.scalar('Cost_CrossEntropy', cost)
summary_merged = tf.summary.merge_all()
with open(varDicSavingTxt, 'w') as outfile:
outfile.write(repr(varDict))
with tf.Session() as sess:
tf.set_random_seed(2)
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(logDir + '/train', sess.graph)
validation_writer = tf.summary.FileWriter(logDir + '/validation')
# initialise iterator with train data
sess.run(train_init_op, feed_dict = {x : train_data[0], y: train_data[1], batch_size: BATCH_SIZE})
print('¡Training starts!')
for epoch in range(N_EPOCHS):
batchAccList = []
tot_loss = 0
for batch in range(N_BATCHES):
optimizer_output, loss_value, summary = sess.run([optimizer, cost, summary_merged])
accBatch = sess.run(accuracy)
tot_loss += loss_value
batchAccList.append(accBatch)
if batch % 10 == 0:
train_writer.add_summary(summary, batch)
epochAcc = tf.reduce_mean(batchAccList)
if epoch%10 == 0:
print("Epoch: {}, Loss: {:.4f}, Accuracy: {}".format(epoch, tot_loss / N_BATCHES, epochAcc))
#confM = sess.run(confusionMatrix)
#confDic = {'confMatrix': confM}
#confTxt = save_dir + '/confMDict.txt'
#with open(confTxt, 'w') as outfile:
# outfile.write(repr(confDic))
#print(confM)
# initialise iterator with validation data
sess.run(validation_init_op, feed_dict = {x : validation_data[0], y: validation_data[1], batch_size:len(validation_data[0])})
print('Validation Loss: {:4f}, Validation Accuracy: {}'.format(sess.run(cost), sess.run(accuracy)))
summary_val = sess.run(summary_merged)
validation_writer.add_summary(summary_val)
saver.save(sess, modelFilesDir)
时遇到了麻烦?
如果确实如此,那么我认为以下代码可能会有所帮助:
startTime: Fri Dec 28 2018 01:15:00 GMT+0200 (Eastern European Standard Time)
要点是,<script src='https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.23.0/moment.js'></script>
<script>
function toDays(startDateString, endDateString) {
const formatString = 'ddd MMM DD YYYY HH:mm:ss [GMT]ZZ [(Eastern European Standard Time)]';
const startDate = moment(startDateString, formatString).utcOffset("+02:00");
const endDate = moment(endDateString, formatString).utcOffset("+02:00");
const start = startDate.format('H:mm');
const end = endDate.format('H:mm');
const dates = [];
while(startDate.isSameOrBefore(endDate, 'day')) {
let currentDay = startDate.format('dddd');
dates.push({day: currentDay, start: start, end: end});
startDate.add(1, 'days');
}
return dates;
}
const result = toDays('Fri Dec 28 2018 01:15:00 GMT+0200 (Eastern European Standard Time)', 'Mon Dec 31 2018 02:15:00 GMT+0200 (Eastern European Standard Time)');
console.log(result);
</script>
行会解析您正确提供的日期格式。