将数组划分为K个子数组,差异最小

时间:2018-02-17 13:20:58

标签: c algorithm dynamic-programming mathematical-optimization

声明:

所描述的问题看起来像是竞赛中的任务。我没有参加任何比赛,我不知道任何可能涉及问题的比赛。如果有任何一个,我会关闭这个问题以保持公平!

我有一个问题: 给定值A和整数K的数组A,将A分成恰好K个非重叠的连续子阵列,使得具有最小子阵列和子阵列最大总和的子阵列之间的差异最小。允许在任何方向上旋转任意数字A.

考虑一个例子:

  

输入:A = [5 1 1 1 3 2],K = 3

     

输出:[5] [1 1 1] [3 2],最大总和= 5,最小总和= 3,结果= 2

我有部分工作代码(非常丑陋,我的不好,但它并不意味着生产质量):

#include <climits>
#include <cstdio>
#include <cstring>

const int max_n = 50;
const int max_k = 20;

int deps[max_n];

int max (int x, int y) {
  return x > y ? x : y;
}

int min (int x, int y) {
  return x < y ? x : y;
}

int sum (int a[], int start, int end) {
  int res = 0;
  for (int i = start; i <= end; ++i) res += a[i];

  return res;
}

int k_partitioning(int k, int n, int deps[]) {
  int res = INT_MAX;
  // consider all possible rotations/shifts
  for(int offset = 0; offset < n; ++offset) {
    for(int l_min = 0; l_min < n; ++l_min) {
      for(int r_min = l_min; r_min < n; ++r_min) {
        // check minimal sum subarray
        int min_sum = sum (deps, l_min, r_min);

        int dp[k][n];
        for (int s = 0; s < k; ++s) {
          for (int q = 0; q < n; ++q) {
            dp[s][q] = 0;
          }
        }
        // assuming that current sum is a target sum
        dp[0][r_min-l_min] = min_sum;

        for(int p = 1; p < k; ++p) {
          for(int l_max = 0; l_max < n; ++l_max) {
            for(int r_max = 0; r_max < n; ++r_max) {
              int max_sum = sum(deps, l_max, r_max);

              if (max_sum >= min_sum) dp[p][r_max] = max(dp[p-1][l_max], max_sum);
            } // l_maxs
          } // r_maxs
        } // partitions
        // printing dp

        // skip incorrect partitioning, when not all K partitions were used
        if (dp[k-1][n-1] == 0) continue;

        // update difference
        res = min (res, dp[k-1][n-1] - min_sum);
      } // end min sum seg
    } // start min sum seg
    //break;
  } // cuts
  return res;
}

int main(int argc, char* argv[]) {
  int k = 0;
  scanf("%d", &k);

  int n = 0;
  scanf("%d", &n);

  for (int i = 0; i < n; ++i) {
    scanf("%d", &deps[i]);
  }

  printf ("%d\n", k_partitioning(k, n, deps));

  return 0;
}

这个想法很简单:假设当前分区具有最小总和,枚举所有可能的最大分区,设置动态编程以生成具有最小值的最大总和,检查差异。总复杂度:O(K * N ^ 4)。

我的问题是它没有通过某些测试,而且我很难对其进行故障排除。有人可以帮我吗?

测试失败,例如:

  

N = 4,K = 2,A = [6 13 10 2]

更新

此版本应修复以前的一些问题。首先,它消除了浪费的循环&#34;偏移&#34;并在l_min循环的末尾添加一个数组旋转。其次,我注意到,dp不能用0初始化 - 这是最小化任务,所以它应该用一些大值初始化(取决于问题的常量,max_value这里已经是超出价值领域)。最后,间隔不应该重叠 - 每个总和不包括间隔的左端。但是,它仍然没有产生预期的结果。

#include <climits>
#include <cstdio>
#include <cstring>

const int max_value = 200000;
const int max_n = 50;
const int max_k = 20;

int deps[max_n];

int max (int x, int y) {
  return x > y ? x : y;
}

int min (int x, int y) {
  return x < y ? x : y;
}

int sum (int a[], int start, int end) {
  int res = 0;
  for (int i = start; i <= end; ++i) res += a[i];

  return res;
}

int k_partitioning(int k, int n, int deps[]) {
  int res = max_value;

  for(int l_min = 0; l_min < n; ++l_min) {
    for(int r_min = l_min; r_min < n; ++r_min) {
      int min_sum = sum (deps, l_min+1, r_min);

      int dp[k][n];
      for (int s = 0; s < k; ++s) {
        for (int q = 0; q < n; ++q) {
          dp[s][q] = max_value;
        }
      }
      // assuming that current sum is a target sum
      dp[0][r_min-l_min] = min_sum;

      for(int p = 1; p < k; ++p) {
        for(int l_max = 0; l_max < n; ++l_max) {
          for(int r_max = l_max; r_max < n; ++r_max) {
            int max_sum = sum(deps, l_max+1, r_max);

            if (max_sum >= min_sum) dp[p][r_max] = max(dp[p-1][l_max], max_sum);
          } // l_maxs
        } // r_maxs
      } // partitions

      // skip incorrect partitioning, when not all K partitions were used
      if (dp[k-1][n-1] == max_value) continue;

      // update difference
      res = min (res, dp[k-1][n-1] - min_sum);
    } // end min sum seg

    // rotate an array to consider different starting points
    int tmp[n];
    for (int i = 0; i < n; ++i) {
      int new_idx = i + n + 1;

      tmp[new_idx % n] = deps[i];
    }

    for(int i = 0; i < n; ++i) deps[i] = tmp[i];
  } // start min sum seg

  return res;
}

int main(int argc, char* argv[]) {
  int k = 0;
  scanf("%d", &k);

  int n = 0;
  scanf("%d", &n);

  for (int i = 0; i < n; ++i) {
    scanf("%d", &deps[i]);
  }

  printf ("%d\n", k_partitioning(k, n, deps));

  return 0;
}

3 个答案:

答案 0 :(得分:3)

好的,我想我做到了!

我们的想法如下:我们假设最小和区间总是从0开始。然后我们开始枚举最小和区间,从最小区间的右边界开始。我们为当前最大间隔构建DP问题以确定最小最大总和。之后,您更新结果并将数组旋转一个。

我的代码并不完美,我计算每次迭代的当前总和。人们可以预先计算它们,并且每次都将它们编入索引。

此代码可能有一些错误,但它通过了我所有的测试。

#include <climits>
#include <cstdio>
#include <cstring>

const int max_value = 200000;
const int max_n = 50;
const int max_k = 20;

int deps[max_n];

int max (int x, int y) {
  return x > y ? x : y;
}

int min (int x, int y) {
  return x < y ? x : y;
}

int sum (int a[], int start, int end) {
  int res = 0;

  for (int i = start; i <= end; ++i) res += a[i];

  return res;
}

int k_partitioning(int k, int n, int deps[]) {
  int res = max_value;
  for(int offset = 0; offset < n; ++offset) {
    int l_min = 0;
    for(int r_min = l_min; r_min < n; ++r_min) {
      int min_sum = sum (deps, l_min, r_min);

      int dp[k][n];
      for (int s = 0; s < k; ++s) {
        for (int q = 0; q < n; ++q) {
          dp[s][q] = max_value;
        }
      }
      // assuming that current sum is a target sum
      dp[0][r_min-l_min] = min_sum;

      for(int p = 1; p < k; ++p) {
        for(int l_max = r_min; l_max < n; ++l_max) {
          for(int r_max = l_max; r_max < n; ++r_max) {
            int max_sum = sum(deps, l_max+1, r_max);

            if (max_sum >= min_sum) {
              dp[p][r_max] = min(dp[p][r_max], max(dp[p-1][l_max], max_sum));
            }

          } // l_maxs
        } // r_maxs
      } // partitions

      // skip incorrect partitioning, when not all K partitions were used
      if (dp[k-1][n-1] == max_value) continue;

      // update difference
      res = min (res, dp[k-1][n-1] - min_sum);
    } // end min sum seg
    int tmp[n];
    for (int i = 0; i < n; ++i) {
      int new_idx = i + n - 1;

      tmp[new_idx % n] = deps[i];
    }

    for(int i = 0; i < n; ++i) deps[i] = tmp[i];

  } // start min sum seg
  return res;
}

int main(int argc, char* argv[]) {
  int k = 0;
  scanf("%d", &k);

  int n = 0;
  scanf("%d", &n);

  for (int i = 0; i < n; ++i) {
    scanf("%d", &deps[i]);
  }

  printf ("%d\n", k_partitioning(k, n, deps));

  return 0;
}

答案 1 :(得分:1)

无旋转的解决方案:

  • 1)计算数组的最大 M 和总计 S - O(n)
  • 2)设一个函数F(P),如果有k(> = 0)个分区仍然可以得到Sum P或更小,则返回True。
  • 3)使用F对范围(M,S)进行二进制搜索。- O(log(S-M))
  • 4)F后面的逻辑:填充一个桶,直到不超过S / K。然后移至下一个铲斗。如果还有剩余的物品而没有剩余的存储桶,则答案为假- O(n)

    时间复杂度 = O(n)+ O(n)*(log(SM))= O(n * log( S - M ))

旋转解决方案:

对于[0,1,... N-1]中的所有旋转,计算最小和。

总时间复杂度 = O(n)* O(nlog(SM))= O(n ^ 2 * log( S - M ))

答案 2 :(得分:0)

现在您的代码已经运行了,这是另一种方法:)

考虑到每个k,我们可以将从A[i]到左侧(sum A[i-j..i])的总和与f(k-1, i-j-1)记录的所有可用间隔配对并更新它们 - 对于每个区间(low, high),如果总和大于high,则new_interval = (low, sum),如果总和低于low,则new_interval = (sum, high);否则,间隔保持不变。例如,

i:  0 1 2 3 4 5
A: [5 1 1 1 3 2]

k = 3
i = 3, j = 0
The ordered intervals available for f(3-1, 3-0-1) = f(2,2) are:
  (2,5), (1,6) // These were the sums, (A[1..2], A[0]) and (A[2], A[0..1])
Sum = A[3..3-0] = 1
Update intervals: (2,5) -> (1,5)
                  (1,6) -> (1,6) no change

现在,我们可以通过在前一轮k轮次中识别和修剪间隔来提高此迭代次数 的效率。

观看:

A: [5 1 1 1 3 2]

K = 1:

  N = 0..5; Intervals: (5,5), (6,6), (7,7), (8,8), (11,11), (13,13)

K = 2:

  N = 0: Intervals: N/A

  N = 1: Intervals: (1,5)

  N = 2: (1,6), (2,5)

    Prune: remove (1,6) since any sum <= 1 would be better paired with (2,5)
           and any sum >= 6 would be better paired with (2,5)

  N = 3: (1,7), (2,6), (3,5)

    Prune: remove (2,6) and (1,7)

  N = 4: (3,8), (4,7), (5,6), (5,6)

    Prune: remove (3,8) and (4,7)

  N = 5: (2,11), (5,8), (6,7)

    Prune: remove (2,11) and (5,8)

对于k = 2,我们现在留下以下修剪记录:

{
  k: 2,
  n: {
    1: (1,5),
    2: (2,5),
    3: (3,5),
    4: (5,6),
    5: (6,7)
  }
}

我们已将k = 3可能拆分列表中n choose 2的迭代减少到n相关拆分!

适用于k = 3的一般算法:

for k' = 1 to k
  for sum A[i-j..i], for i <- [k'-1..n], j <- [0..i-k'+1]:
    for interval in record[k'-1][i-j-1]: // records are for [k'][n']
      update interval
  prune intervals in k'

k' = 3
  i = 2
    sum = 1, record[2][1] = (1,5) -> no change

  i = 3
    // sums are accumulating right to left starting from A[i]
    sum = 1, record[2][2] = (2,5) -> (1,5)
    sum = 2, record[2][1] = (1,5) -> no change

  i = 4
    sum = 3, record[2][3] = (3,5) -> no change
    sum = 4, record[2][2] = (2,5) -> no change
    sum = 5, record[2][1] = (1,5) -> no change

  i = 5
    sum = 2, record[2][4] = (5,6) -> (2,6)
    sum = 5, record[2][3] = (3,5) -> no change
    sum = 6, record[2][2] = (2,5) -> (2,6)
    sum = 7, record[2][1] = (1,5) -> (1,7)

答案是5record[2][3] = (3,5)配对,产生更新的时间间隔(3,5)。我会留下修剪逻辑让读者解决问题。如果我们想继续,这是k = 3

的修剪列表
{
  k: 3
  n: {
    2: (1,5), 
    3: (1,5),
    4: (3,5),
    5: (3,5)
  }
}