在训练和测试方面将数据集划分为类分布

时间:2020-04-28 07:55:30

标签: python machine-learning cross-validation

我想在给定的数据集中以以下分布进行10次机器学习算法的运行

np.unique(x[:,24], return_counts=True)
(array([1., 2.]), array([700, 300]))

这意味着我70%的数据来自第1类,而30%的数据来自第2类。

下面有我的数据快照。最后一栏告知类别标签(1或2):

1,6,4,12,5,5,3,4,1,67,3,2,1,2,1,0,0,1,0,0,1,0,0,1,1
2,48,2,60,1,3,2,2,1,22,3,1,1,1,1,0,0,1,0,0,1,0,0,1,2
4,12,4,21,1,4,3,3,1,49,3,1,2,1,1,0,0,1,0,0,1,0,1,0,1
1,42,2,79,1,4,3,4,2,45,3,1,2,1,1,0,0,0,0,0,0,0,0,1,1
1,24,3,49,1,3,3,4,4,53,3,2,2,1,1,1,0,1,0,0,0,0,0,1,2
4,36,2,91,5,3,3,4,4,35,3,1,2,2,1,0,0,1,0,0,0,0,1,0,1
4,24,2,28,3,5,3,4,2,53,3,1,1,1,1,0,0,1,0,0,1,0,0,1,1
2,36,2,69,1,3,3,2,3,35,3,1,1,2,1,0,1,1,0,1,0,0,0,0,1
4,12,2,31,4,4,1,4,1,61,3,1,1,1,1,0,0,1,0,0,1,0,1,0,1
2,30,4,52,1,1,4,2,3,28,3,2,1,1,1,1,0,1,0,0,1,0,0,0,2
2,12,2,13,1,2,2,1,3,25,3,1,1,1,1,1,0,1,0,1,0,0,0,1,2
1,48,2,43,1,2,2,4,2,24,3,1,1,1,1,0,0,1,0,1,0,0,0,1,2
2,12,2,16,1,3,2,1,3,22,3,1,1,2,1,0,0,1,0,0,1,0,0,1,1
1,24,4,12,1,5,3,4,3,60,3,2,1,1,1,1,0,1,0,0,1,0,1,0,2
1,15,2,14,1,3,2,4,3,28,3,1,1,1,1,1,0,1,0,1,0,0,0,1,1
1,24,2,13,2,3,2,2,3,32,3,1,1,1,1,0,0,1,0,0,1,0,1,0,2
4,24,4,24,5,5,3,4,2,53,3,2,1,1,1,0,0,1,0,0,1,0,0,1,1
1,30,0,81,5,2,3,3,3,25,1,3,1,1,1,0,0,1,0,0,1,0,0,1,1
2,24,2,126,1,5,2,2,4,44,3,1,1,2,1,0,1,1,0,0,0,0,0,0,2
4,24,2,34,3,5,3,2,3,31,3,1,2,2,1,0,0,1,0,0,1,0,0,1,1
4,9,4,21,1,3,3,4,3,48,3,3,1,2,1,1,0,1,0,0,1,0,0,1,1
1,6,2,26,3,3,3,3,1,44,3,1,2,1,1,0,0,1,0,1,0,0,0,1,1
1,10,4,22,1,2,3,3,1,48,3,2,2,1,2,1,0,1,0,1,0,0,1,0,1
2,12,4,18,2,2,3,4,2,44,3,1,1,1,1,0,1,1,0,0,1,0,0,1,1
4,10,4,21,5,3,4,1,3,26,3,2,1,1,2,0,0,1,0,0,1,0,0,1,1
1,6,2,14,1,3,3,2,1,36,1,1,1,2,1,0,0,1,0,0,1,0,1,0,1
4,6,0,4,1,5,4,4,3,39,3,1,1,1,1,0,0,1,0,0,1,0,1,0,1
3,12,1,4,4,3,2,3,1,42,3,2,1,1,1,0,0,1,0,1,0,0,0,1,1
2,7,2,24,1,3,3,2,1,34,3,1,1,1,1,0,0,0,0,0,1,0,0,1,1
1,60,3,68,1,5,3,4,4,63,3,2,1,2,1,0,0,1,0,0,1,0,0,1,2
2,18,2,19,4,2,4,3,1,36,1,1,1,2,1,0,0,1,0,0,1,0,0,1,1
1,24,2,40,1,3,3,2,3,27,2,1,1,1,1,0,0,1,0,0,1,0,0,1,1
2,18,2,59,2,3,3,2,3,30,3,2,1,2,1,1,0,1,0,0,1,0,0,1,1
4,12,4,13,5,5,3,4,4,57,3,1,1,1,1,0,0,1,0,1,0,0,1,0,1
3,12,2,15,1,2,2,1,2,33,1,1,1,2,1,0,0,1,0,0,1,0,0,0,1
2,45,4,47,1,2,3,2,2,25,3,2,1,1,1,0,0,1,0,0,1,0,1,0,2
4,48,4,61,1,3,3,3,4,31,1,1,1,2,1,0,0,1,0,0,0,0,0,1,1

完整数据集可以找到here

我想将数据分为90%进行训练和10%进行测试。但是,对于每次拆分,我必须保持数据的比例(例如,在训练和验证拆分中,70%的数据必须属于1类,而30%的属于2类)

我知道如何将数据简单地分为训练和测试,但是我不知道如何进行这种划分以服从上面引用的类分布。如何在Python中做到这一点?

2 个答案:

答案 0 :(得分:1)

您可以使用RepeatedStratifiedKFold,顾名思义,它会重复进行{-{1}}次K形交叉验证。要重复该过程n次,请设置10,并以n_repeats / 9:1的大小大致占train的比例,我们可以设置{{ 1}}:

test

n_splits=10

答案 1 :(得分:0)

scikit-learn train_test_split是将数据分为训练和测试的一种众所周知的方法。

model_selection.train_test_split的API文档。

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)

您可以使用random_state变量(种子),直到您在各个类之间的比例正确为止。尽管train_test_split不会强制执行比例,但通常会遵循总体中的比例。