尝试使用sklearn train_split方法训练模型时出现此错误。我已经在数组中添加了一些数据点,然后创建了一个数组数组。然后将该数组转换为numpy数组。任何想法如何产生此错误并可以解决?
如果开始时间<结束时间:
if value[9:] == "A": data.append(80) else: data.append(int(value[9:])) else: allData.append(data) data = [] if value[9:] == "A": data.append(80) else: data.append(int(value[9:])) endTime_obj = endTime_obj + datetime.timedelta(0, 30) end_time = endTime_obj.time() # print("else: ", end_time) if dataPoints.index(value) == len(dataPoints)-1: allData.append(data) else: pass
allData = np.array(allData)
labels = np.array(labels)
from sklearn.model_selection import train_test_split
train_data, test_data, train_labels, test_labels = train_test_split(allData, labels, test_size=0.1, random_state=1)
# Embedding
max_features = 30000
maxlen = (1,)
embedding_size = 128
# Convolution
kernel_size = 16
filters = 128
pool_size = 4
LSTM
lstm_output_size = 30
# Training
batch_size = 40
epochs = 15
model = Sequential()
# model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout(0.25))
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=2))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(LSTM(lstm_output_size))
model.add(Dense(6, activation='relu'))
model.add(Activation('softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(train_data, train_labels,
batch_size=batch_size,
epochs=epochs,
validation_data=(validation_data, validation_labels))
----------错误--------------------------
ValueError Traceback (most recent call last)
<ipython-input-15-381a00c7e200> in <module>
2 batch_size=batch_size,
3 epochs=epochs,
----> 4 validation_data=(validation_data, validation_labels))
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
707 steps=steps_per_epoch,
708 validation_split=validation_split,
--> 709 shuffle=shuffle)
710
711 # Prepare validation data.
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)
2556 else:
2557 cast_inputs = x_input
-> 2558 self._set_inputs(cast_inputs)
2559 else:
2560 y_input = y
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py in _set_inputs(self, inputs, outputs, training)
2774 kwargs = {'training': training} if self._expects_training_arg else {}
2775 try:
-> 2776 outputs = self(inputs, **kwargs)
2777 except NotImplementedError:
2778 # This Model or a submodel is dynamic and hasn't overridden
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
632 outputs = base_layer_utils.mark_as_return(outputs, acd)
633 else:
--> 634 outputs = call_fn(inputs, *args, **kwargs)
635
636 except TypeError as e:
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\engine\sequential.py in call(self, inputs, training, mask)
259 kwargs['training'] = training
260
--> 261 outputs = layer(inputs, **kwargs)
262
263 # `outputs` will be the inputs to the next layer.
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
632 outputs = base_layer_utils.mark_as_return(outputs, acd)
633 else:
--> 634 outputs = call_fn(inputs, *args, **kwargs)
635
636 except TypeError as e:
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\layers\core.py in call(self, inputs, training)
160 output = tf_utils.smart_cond(training,
161 dropped_inputs,
--> 162 lambda: array_ops.identity(inputs))
163 return output
164
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\utils\tf_utils.py in smart_cond(pred, true_fn, false_fn, name)
56 pred, true_fn=true_fn, false_fn=false_fn, name=name)
57 return smart_module.smart_cond(
---> 58 pred, true_fn=true_fn, false_fn=false_fn, name=name)
59
60
C:\Program Files\Python37\lib\site-packages\tensorflow\python\framework\smart_cond.py in smart_cond(pred, true_fn, false_fn, name)
57 else:
58 return control_flow_ops.cond(pred, true_fn=true_fn, false_fn=false_fn,
---> 59 name=name)
60
61
C:\Program Files\Python37\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
C:\Program Files\Python37\lib\site-packages\tensorflow\python\ops\control_flow_ops.py in cond(pred, true_fn, false_fn, strict, name, fn1, fn2)
1975 try:
1976 context_t.Enter()
-> 1977 orig_res_t, res_t = context_t.BuildCondBranch(true_fn)
1978 if orig_res_t is None:
1979 raise ValueError("true_fn must have a return value.")
C:\Program Files\Python37\lib\site-packages\tensorflow\python\ops\control_flow_ops.py in BuildCondBranch(self, fn)
1812 """Add the subgraph defined by fn() to the graph."""
1813 pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
-> 1814 original_result = fn()
1815 post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
1816 if len(post_summaries) > len(pre_summaries):
C:\Program Files\Python37\lib\site-packages\tensorflow\python\keras\layers\core.py in dropped_inputs()
156 noise_shape=self._get_noise_shape(inputs),
157 seed=self.seed,
--> 158 rate=self.rate)
159
160 output = tf_utils.smart_cond(training,
C:\Program Files\Python37\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
C:\Program Files\Python37\lib\site-packages\tensorflow\python\ops\nn_ops.py in dropout(x, keep_prob, noise_shape, seed, name, rate)
4168 raise ValueError("You must provide a rate to dropout.")
4169
-> 4170 return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
4171
4172
C:\Program Files\Python37\lib\site-packages\tensorflow\python\ops\nn_ops.py in dropout_v2(x, rate, noise_shape, seed, name)
4213 if not x.dtype.is_floating:
4214 raise ValueError("x has to be a floating point tensor since it's going to"
-> 4215 " be scaled. Got a %s tensor instead." % x.dtype)
4216 if isinstance(rate, numbers.Real):
4217 if not (rate >= 0 and rate < 1):
ValueError: x has to be a floating point tensor since it's going to be scaled. Got a <dtype: 'string'> tensor instead.
答案 0 :(得分:0)
很难说,因为我实际上看不到您的数据,但是您可以尝试:
all_data = np.array(all_data)
refined_data = all_data.astype(np.float)