我正在解决this task (problem I)。声明是:
集合{1, 2, 3, ..., n}
中有多少个子集是互质的?如果每个元素的每两个都是互质的,则一组整数称为互质。如果最大公约数等于1,则两个整数是互质的。
输入
第一行输入包含两个整数n
和m
(1 <= n <= 3000, 1 <= m <= 10^9 + 9
)
输出
输出{1, 2, 3, ..., n}
模m
的互质子集数。
实施例
输入:4 7 输出:5 {1,2,3,4}
有12个互质子集:{}
,{1}
,{2}
,{3}
,{4}
,{1,2}
,{1,3}
,{1,4}
,{2,3}
,{3,4}
,{1,2,3}
,{1,3,4}
。
我认为可以通过使用素数来解决。 (跟踪我们是否使用了每个素数)..但我不确定。
我可以获得一些解决此任务的提示吗?
答案 0 :(得分:16)
好的,这是货。随后的C程序得到的n = 3000 对我而言超过5秒。我的帽子已经解决了解决这个问题的团队 竞争环境中的问题。
该算法基于对待小型和大型的思想 素数不同。如果素数最多为n,则素数为小。 否则,它是大。观察每个数字小于或等于 n最多有一个大的素因子。
我们创建一个由成对索引的表。每对的第一个组成部分 指定正在使用的大素数的数量。第二部分 每对指定一组正在使用的小素数。 a的价值 特定索引是具有该使用模式的解决方案的数量 包含1或一个大素数(我们计算后来乘以 适当的权力2)。
我们在数字j上向下迭代,没有大的素因子。在 在每次迭代开始时,该表包含子集的计数 j..n。内循环中有两个添加项。第一个帐户 用于通过j本身扩展子集,这不会增加数量 大型素数在使用中。第二个是通过j扩展子集 这是一个很大的素数。合适的大质数的数量是 大素数的数量不大于n / j,减去数量 已经在使用的大素数,因为向下迭代暗示了这一点 已经使用的每个大素数都不大于n / j。
最后,我们对表条目求和。表中计算了每个子集 产生2 ** k个子集,其中k是1加上未使用的数量 大素数,1和每个未使用的大素数可以包括或 独立排除。
/* assumes int, long are 32, 64 bits respectively */
#include <stdio.h>
#include <stdlib.h>
enum {
NMAX = 3000
};
static int n;
static long m;
static unsigned smallfactors[NMAX + 1];
static int prime[NMAX - 1];
static int primecount;
static int smallprimecount;
static int largeprimefactor[NMAX + 1];
static int largeprimecount[NMAX + 1];
static long **table;
static void eratosthenes(void) {
int i;
for (i = 2; i * i <= n; i++) {
int j;
if (smallfactors[i]) continue;
for (j = i; j <= n; j += i) smallfactors[j] |= 1U << primecount;
prime[primecount++] = i;
}
smallprimecount = primecount;
for (; i <= n; i++) {
if (!smallfactors[i]) prime[primecount++] = i;
}
if (0) {
int k;
for (k = 0; k < primecount; k++) printf("%d\n", prime[k]);
}
}
static void makelargeprimefactor(void) {
int i;
for (i = smallprimecount; i < primecount; i++) {
int p = prime[i];
int j;
for (j = p; j <= n; j += p) largeprimefactor[j] = p;
}
}
static void makelargeprimecount(void) {
int i = 1;
int j;
for (j = primecount; j > smallprimecount; j--) {
for (; i <= n / prime[j - 1]; i++) {
largeprimecount[i] = j - smallprimecount;
}
}
if (0) {
for (i = 1; i <= n; i++) printf("%d %d\n", i, largeprimecount[i]);
}
}
static void maketable(void) {
int i;
int j;
table = calloc(smallprimecount + 1, sizeof *table);
for (i = 0; i <= smallprimecount; i++) {
table[i] = calloc(1U << smallprimecount, sizeof *table[i]);
}
table[0][0U] = 1L % m;
for (j = n; j >= 2; j--) {
int lpc = largeprimecount[j];
unsigned sf = smallfactors[j];
if (largeprimefactor[j]) continue;
for (i = 0; i < smallprimecount; i++) {
long *cur = table[i];
long *next = table[i + 1];
unsigned f;
for (f = sf; f < (1U << smallprimecount); f = (f + 1U) | sf) {
cur[f] = (cur[f] + cur[f & ~sf]) % m;
}
if (lpc - i <= 0) continue;
for (f = sf; f < (1U << smallprimecount); f = (f + 1U) | sf) {
next[f] = (next[f] + cur[f & ~sf] * (lpc - i)) % m;
}
}
}
}
static long timesexp2mod(long x, int y) {
long z = 2L % m;
for (; y > 0; y >>= 1) {
if (y & 1) x = (x * z) % m;
z = (z * z) % m;
}
return x;
}
static long computetotal(void) {
long total = 0L;
int i;
for (i = 0; i <= smallprimecount; i++) {
unsigned f;
for (f = 0U; f < (1U << smallprimecount); f++) {
total = (total + timesexp2mod(table[i][f], largeprimecount[1] - i + 1)) % m;
}
}
return total;
}
int main(void) {
scanf("%d%ld", &n, &m);
eratosthenes();
makelargeprimefactor();
makelargeprimecount();
maketable();
if (0) {
int i;
for (i = 0; i < 100; i++) {
printf("%d %ld\n", i, timesexp2mod(1L, i));
}
}
printf("%ld\n", computetotal());
return EXIT_SUCCESS;
}
答案 1 :(得分:6)
这是一个答案,它会在不到一秒的时间内通过sequence中的前200个元素,给出正确答案200→374855124868136960。通过优化(参见编辑1),它可以计算出90岁以下的前500个参赛作品很快 - 尽管@David Eisenstat的答案如果可以开发的话可能会更好。我认为到目前为止给出的算法采用了不同的方法,包括我自己的原始答案,所以我要单独发布。
经过优化后,我意识到我真的在编写图形问题,所以我将解决方案重写为图形实现(参见编辑2)。图形实现允许一些更优化,更优雅,更快一个数量级,并且更好地扩展:它在1.5秒内计算f(600)
,而不是27秒。
这里的主要思想是使用递归关系。对于任何集合,满足标准的子集数量是:
的总和在第二种情况下,当明确包含元素时,必须删除任何其他与其不相互作用的元素。
效率问题:
以下代码。
#include <cassert>
#include <vector>
#include <set>
#include <map>
#include <algorithm>
#include <iostream>
#include <ctime>
const int PRIMES[] = // http://rlrr.drum-corps.net/misc/primes1.shtml
{ 2, 3, 5, ...
..., 2969, 2971, 2999 };
const int NPRIMES = sizeof(PRIMES) / sizeof(int);
typedef std::set<int> intset;
typedef std::vector<intset> intsetvec;
const int MAXCALC = 200; // answer at http://oeis.org/A084422/b084422.txt
intsetvec primeFactors(MAXCALC +1);
typedef std::vector<int> intvec;
// Caching / memoization
typedef std::map<intvec, double> intvec2dbl;
intvec2dbl set2NumCoPrimeSets;
double NumCoPrimeSets(const intvec& set)
{
if (set.empty())
return 1;
// Caching / memoization
const intvec2dbl::const_iterator i = set2NumCoPrimeSets.find(set);
if (i != set2NumCoPrimeSets.end())
return i->second;
// Result is the number of coprime sets in:
// setA, the set that definitely has the first element of the input present
// + setB, the set the doesn't have the first element of the input present
// Because setA definitely has the first element, we remove elements it isn't coprime with
// We also remove the first element: as this is definitely present it doesn't make any
// difference to the number of sets
intvec setA(set);
const int firstNum = *setA.begin();
const intset& factors = primeFactors[firstNum];
for(int factor : factors) {
setA.erase(std::remove_if(setA.begin(), setA.end(),
[factor] (int i) { return i % factor == 0; } ), setA.end());
}
// If the first element was already coprime with the rest, then we have setA = setB
// and we can do a single call (m=2). Otherwise we have two recursive calls.
double m = 1;
double c = 0;
assert(set.size() - setA.size() > 0);
if (set.size() - setA.size() > 1) {
intvec setB(set);
setB.erase(setB.begin());
c = NumCoPrimeSets(setB);
}
else {
// first elt coprime with rest
m = 2;
}
const double numCoPrimeSets = m * NumCoPrimeSets(setA) + c;
// Caching / memoization
set2NumCoPrimeSets.insert(intvec2dbl::value_type(set, numCoPrimeSets));
return numCoPrimeSets;
}
int main(int argc, char* argv[])
{
// Calculate prime numbers that factor into each number upto MAXCALC
primeFactors[1].insert(1); // convenient
for(int i=2; i<=MAXCALC; ++i) {
for(int j=0; j<NPRIMES; ++j) {
if (i % PRIMES[j] == 0) {
primeFactors[i].insert(PRIMES[j]);
}
}
}
const clock_t start = clock();
for(int n=1; n<=MAXCALC; ++n) {
intvec v;
for(int i=n; i>0; --i) { // reverse order to reduce recursion
v.push_back(i);
}
const clock_t now = clock();
const clock_t ms = now - start;
const double numCoPrimeSubsets = NumCoPrimeSets(v);
std::cout << n << ", " << std::fixed << numCoPrimeSubsets << ", " << ms << "\n";
}
return 0;
}
时间特征看起来比my first answer好很多。但仍然不会在5s内达到3000!
编辑1
可以对此方法进行一些有趣的优化。总体而言,这对较大的n
提供了4倍的提升。
m
个数字,那么原始集合的组合数比2个 m 因子多减少的一个(因为对于每个互质,你可以独立于其他元素将它放入或放出集合中。){2, 3, 15, 19, 45}
,数字15和45具有相同的素数因子3和5.一次删除 2 个数,因此{{1}的子集数量} = 两次 15或45的组合数(对于集合{2, 3, 15, 19, 45}
,因为如果存在15或45则必须不存在3)+ 15的子集数量和45缺席(对于集合{2, 19}
){2, 3, 19}
数字类型可将性能提高约10%。short
与{ 3, 9, 15}
等同(同构)。这是最激进的想法,但可能对性能的影响最小。用一个具体的例子来理解它可能要容易得多。我已经选择了{1..12}这个集合,它足够大,可以让人感觉它是如何工作的,但又足够小,以便它可以理解。
2, 4, 6
以下代码
NumCoPrimeSets({ 1 2 3 4 5 6 7 8 9 10 11 12 })
Removed 3 coprimes, giving set { 2 3 4 5 6 8 9 10 12 } multiplication factor now 8
Removing the most connected number 12 with 8 connections
To get setA, remove all numbers which have *any* of the prime factors { 2 3 }
setA = { 5 }
To get setB, remove 2 numbers which have *exactly* the prime factors { 2 3 }
setB = { 2 3 4 5 8 9 10 }
**** Recursing on 2 * NumCoPrimeSets(setA) + NumCoPrimeSets(setB)
NumCoPrimeSets({ 5 })
Base case return the multiplier, which is 2
NumCoPrimeSets({ 2 3 4 5 8 9 10 })
Removing the most connected number 10 with 4 connections
To get setA, remove all numbers which have *any* of the prime factors { 2 5 }
setA = { 3 9 }
To get setB, remove 1 numbers which have *exactly* the prime factors { 2 5 }
setB = { 2 3 4 5 8 9 }
**** Recursing on 1 * NumCoPrimeSets(setA) + NumCoPrimeSets(setB)
NumCoPrimeSets({ 3 9 })
Transformed 2 primes, giving new set { 2 4 }
Removing the most connected number 4 with 1 connections
To get setA, remove all numbers which have *any* of the prime factors { 2 }
setA = { }
To get setB, remove 2 numbers which have *exactly* the prime factors { 2 }
setB = { }
**** Recursing on 2 * NumCoPrimeSets(setA) + NumCoPrimeSets(setB)
NumCoPrimeSets({ })
Base case return the multiplier, which is 1
NumCoPrimeSets({ })
Base case return the multiplier, which is 1
**** Returned from recursing on 2 * NumCoPrimeSets({ }) + NumCoPrimeSets({ })
Caching for{ 2 4 }: 3 = 2 * 1 + 1
Returning for{ 3 9 }: 3 = 1 * 3
NumCoPrimeSets({ 2 3 4 5 8 9 })
Removed 1 coprimes, giving set { 2 3 4 8 9 } multiplication factor now 2
Removing the most connected number 8 with 2 connections
To get setA, remove all numbers which have *any* of the prime factors { 2 }
setA = { 3 9 }
To get setB, remove 3 numbers which have *exactly* the prime factors { 2 }
setB = { 3 9 }
**** Recursing on 3 * NumCoPrimeSets(setA) + NumCoPrimeSets(setB)
NumCoPrimeSets({ 3 9 })
Transformed 2 primes, giving new set { 2 4 }
Cache hit, returning 3 = 1 * 3
NumCoPrimeSets({ 3 9 })
Transformed 2 primes, giving new set { 2 4 }
Cache hit, returning 3 = 1 * 3
**** Returned from recursing on 3 * NumCoPrimeSets({ 3 9 }) + NumCoPrimeSets({ 3 9 })
Caching for{ 2 3 4 8 9 }: 12 = 3 * 3 + 3
Returning for{ 2 3 4 5 8 9 }: 24 = 2 * 12
**** Returned from recursing on 1 * NumCoPrimeSets({ 3 9 }) + NumCoPrimeSets({ 2 3 4 5 8 9 })
Caching for{ 2 3 4 5 8 9 10 }: 27 = 1 * 3 + 24
Returning for{ 2 3 4 5 8 9 10 }: 27 = 1 * 27
**** Returned from recursing on 2 * NumCoPrimeSets({ 5 }) + NumCoPrimeSets({ 2 3 4 5 8 9 10 })
Caching for{ 2 3 4 5 6 8 9 10 12 }: 31 = 2 * 2 + 27
Returning for{ 1 2 3 4 5 6 7 8 9 10 11 12 }: 248 = 8 * 31
可以在大约5分钟内处理最多#include <cassert>
#include <vector>
#include <set>
#include <map>
#include <unordered_map>
#include <queue>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <ctime>
typedef short numtype;
const numtype PRIMES[] = // http://rlrr.drum-corps.net/misc/primes1.shtml
...
const numtype NPRIMES = sizeof(PRIMES) / sizeof(numtype);
typedef std::set<numtype> numset;
typedef std::vector<numset> numsetvec;
const numtype MAXCALC = 200; // answer at http://oeis.org/A084422/b084422.txt
numsetvec primeFactors(MAXCALC +1);
typedef std::vector<numtype> numvec;
// Caching / memoization
typedef std::map<numvec, double> numvec2dbl;
numvec2dbl set2NumCoPrimeSets;
double NumCoPrimeSets(const numvec& initialSet)
{
// Preprocessing step: remove numbers which are already coprime
typedef std::unordered_map<numtype, numvec> num2numvec;
num2numvec prime2Elts;
for(numtype num : initialSet) {
const numset& factors = primeFactors[num];
for(numtype factor : factors) {
prime2Elts[factor].push_back(num);
}
}
numset eltsToRemove(initialSet.begin(), initialSet.end());
typedef std::vector<std::pair<numtype,int>> numintvec;
numvec primesRemaining;
for(const num2numvec::value_type& primeElts : prime2Elts) {
if (primeElts.second.size() > 1) {
for (numtype num : primeElts.second) {
eltsToRemove.erase(num);
}
primesRemaining.push_back(primeElts.first);
}
}
double mult = pow(2.0, eltsToRemove.size());
if (eltsToRemove.size() == initialSet.size())
return mult;
// Do the removal by creating a new set
numvec set;
for(numtype num : initialSet) {
if (eltsToRemove.find(num) == eltsToRemove.end()) {
set.push_back(num);
}
}
// Transform to use a smaller set of primes before checking the cache
// (beta code but it seems to work, mostly!)
std::sort(primesRemaining.begin(), primesRemaining.end());
numvec::const_iterator p = primesRemaining.begin();
for(int j=0; p!= primesRemaining.end() && j<NPRIMES; ++p, ++j) {
const numtype primeRemaining = *p;
if (primeRemaining != PRIMES[j]) {
for(numtype& num : set) {
while (num % primeRemaining == 0) {
num = num / primeRemaining * PRIMES[j];
}
}
}
}
// Caching / memoization
const numvec2dbl::const_iterator i = set2NumCoPrimeSets.find(set);
if (i != set2NumCoPrimeSets.end())
return mult * i->second;
// Remove the most connected number
typedef std::unordered_map<numtype, int> num2int;
num2int num2ConnectionCount;
for(numvec::const_iterator srcIt=set.begin(); srcIt!=set.end(); ++srcIt) {
const numtype src = *srcIt;
const numset& srcFactors = primeFactors[src];
for(numvec::const_iterator tgtIt=srcIt +1; tgtIt!=set.end(); ++tgtIt) {
for(numtype factor : srcFactors) {
const numtype tgt = *tgtIt;
if (tgt % factor == 0) {
num2ConnectionCount[src]++;
num2ConnectionCount[tgt]++;
}
}
}
}
num2int::const_iterator connCountIt = num2ConnectionCount.begin();
numtype numToErase = connCountIt->first;
int maxConnCount = connCountIt->second;
for (; connCountIt!=num2ConnectionCount.end(); ++connCountIt) {
if (connCountIt->second > maxConnCount || connCountIt->second == maxConnCount && connCountIt->first > numToErase) {
numToErase = connCountIt->first;
maxConnCount = connCountIt->second;
}
}
// Result is the number of coprime sets in:
// setA, the set that definitely has a chosen element of the input present
// + setB, the set the doesn't have the chosen element(s) of the input present
// Because setA definitely has a chosen element, we remove elements it isn't coprime with
// We also remove the chosen element(s): as they are definitely present it doesn't make any
// difference to the number of sets
numvec setA(set);
const numset& factors = primeFactors[numToErase];
for(numtype factor : factors) {
setA.erase(std::remove_if(setA.begin(), setA.end(),
[factor] (numtype i) { return i % factor == 0; } ), setA.end());
}
// setB: remove all elements which have the same prime factors
numvec setB(set);
setB.erase(std::remove_if(setB.begin(), setB.end(),
[&factors] (numtype i) { return primeFactors[i] == factors; }), setB.end());
const size_t numEltsWithSamePrimeFactors = (set.size() - setB.size());
const double numCoPrimeSets =
numEltsWithSamePrimeFactors * NumCoPrimeSets(setA) + NumCoPrimeSets(setB);
// Caching / memoization
set2NumCoPrimeSets.insert(numvec2dbl::value_type(set, numCoPrimeSets));
return mult * numCoPrimeSets;
}
int main(int argc, char* argv[])
{
// Calculate prime numbers that factor into each number upto MAXCALC
for(numtype i=2; i<=MAXCALC; ++i) {
for(numtype j=0; j<NPRIMES; ++j) {
if (i % PRIMES[j] == 0) {
primeFactors[i].insert(PRIMES[j]);
}
}
}
const clock_t start = clock();
std::ofstream fout("out.txt");
for(numtype n=0; n<=MAXCALC; ++n) {
numvec v;
for(numtype i=1; i<=n; ++i) {
v.push_back(i);
}
const clock_t now = clock();
const clock_t ms = now - start;
const double numCoPrimeSubsets = NumCoPrimeSets(v);
fout << n << ", " << std::fixed << numCoPrimeSubsets << ", " << ms << "\n";
std::cout << n << ", " << std::fixed << numCoPrimeSubsets << ", " << ms << "\n";
}
return 0;
}
。然而,时间看起来仍呈指数增长,每50到60 n=600
左右翻倍。仅计算一个n
的图表如下所示。
编辑2
此解决方案更自然地以图形方式实现。又出现了两个优化:
最重要的是,如果图G可以被分成两组A和B,使得A和B之间没有连接,那么互质(G)=互质(A)*互质(B)。 / p>
其次,可以将一组素数因子的所有数字折叠成单个节点,因此节点的值是其素数因子的数量。
< / LI>在下面的代码中,n
类保存原始邻接矩阵和节点值,Graph
类将当前剩余节点列表保存为FilteredGraph
,以便作为节点如果删除了新的邻接矩阵,可以通过位掩码来计算(并且在递归中传递的数据相对较少)。
bitset
计算互质(#include "Primes.h"
#include <cassert>
#include <bitset>
#include <vector>
#include <set>
#include <map>
#include <unordered_map>
#include <algorithm>
#include <iostream>
#include <ctime>
// Graph declaration
const int MAXGROUPS = 1462; // empirically determined
class Graph
{
typedef std::bitset<MAXGROUPS> bitset;
typedef std::vector<bitset> adjmatrix;
typedef std::vector<int> intvec;
public:
Graph(int numNodes)
: m_nodeValues(numNodes), m_adjMatrix(numNodes) {}
void SetNodeValue(int i, int v) { m_nodeValues[i] = v; }
void SetConnection(int i, int j)
{
m_adjMatrix[i][j] = true;
m_adjMatrix[j][i] = true;
}
int size() const { return m_nodeValues.size(); }
private:
adjmatrix m_adjMatrix;
intvec m_nodeValues;
friend class FilteredGraph;
};
class FilteredGraph
{
typedef Graph::bitset bitset;
public:
FilteredGraph(const Graph* unfiltered);
int FirstNode() const;
int RemoveNode(int node);
void RemoveNodesConnectedTo(int node);
double RemoveDisconnectedNodes();
bool AttemptPartition(FilteredGraph* FilteredGraph);
size_t Hash() const { return std::hash<bitset>()(m_includedNodes); }
bool operator==(const FilteredGraph& x) const
{ return x.m_includedNodes == m_includedNodes && x.m_unfiltered == m_unfiltered; }
private:
bitset RawAdjRow(int i) const {
return m_unfiltered->m_adjMatrix[i];
}
bitset AdjRow(int i) const {
return RawAdjRow(i) & m_includedNodes;
}
int NodeValue(int i) const {
return m_unfiltered->m_nodeValues[i];
}
const Graph* m_unfiltered;
bitset m_includedNodes;
};
// Cache
namespace std {
template<>
class hash<FilteredGraph> {
public:
size_t operator()(const FilteredGraph & x) const { return x.Hash(); }
};
}
typedef std::unordered_map<FilteredGraph, double> graph2double;
graph2double cache;
// MAIN FUNCTION
double NumCoPrimesSubSets(const FilteredGraph& graph)
{
graph2double::const_iterator cacheIt = cache.find(graph);
if (cacheIt != cache.end())
return cacheIt->second;
double rc = 1;
FilteredGraph A(graph);
FilteredGraph B(graph);
if (A.AttemptPartition(&B)) {
rc = NumCoPrimesSubSets(A);
A = B;
}
const int nodeToRemove = A.FirstNode();
if (nodeToRemove < 0) // empty graph
return 1;
// Graph B is the graph with a node removed
B.RemoveNode(nodeToRemove);
// Graph A is the graph with the node present -- and hence connected nodes removed
A.RemoveNodesConnectedTo(nodeToRemove);
// The number of numbers in the node is the number of times it can be reused
const double removedNodeValue = A.RemoveNode(nodeToRemove);
const double A_disconnectedNodesMult = A.RemoveDisconnectedNodes();
const double B_disconnectedNodesMult = B.RemoveDisconnectedNodes();
const double A_coprimes = NumCoPrimesSubSets(A);
const double B_coprimes = NumCoPrimesSubSets(B);
rc *= removedNodeValue * A_disconnectedNodesMult * A_coprimes
+ B_disconnectedNodesMult * B_coprimes;
cache.insert(graph2double::value_type(graph, rc));
return rc;
}
// Program entry point
int Sequence2Graph(Graph** ppGraph, int n);
int main(int argc, char* argv[])
{
const clock_t start = clock();
int n=800; // runs in approx 6s on my machine
Graph* pGraph = nullptr;
const int coPrimesRemoved = Sequence2Graph(&pGraph, n);
const double coPrimesMultiplier = pow(2,coPrimesRemoved);
const FilteredGraph filteredGraph(pGraph);
const double numCoPrimeSubsets = coPrimesMultiplier * NumCoPrimesSubSets(filteredGraph);
delete pGraph;
cache.clear(); // as it stands the cache can't cope with other Graph objects, e.g. for other n
const clock_t now = clock();
const clock_t ms = now - start;
std::cout << n << ", " << std::fixed << numCoPrimeSubsets << ", " << ms << "\n";
return 0;
}
// Graph implementation
FilteredGraph::FilteredGraph(const Graph* unfiltered)
: m_unfiltered(unfiltered)
{
for(int i=0; i<m_unfiltered->size(); ++i) {
m_includedNodes.set(i);
}
}
int FilteredGraph::FirstNode() const
{
int firstNode=0;
for(; firstNode<m_unfiltered->size() && !m_includedNodes.test(firstNode); ++firstNode) {
}
if (firstNode == m_unfiltered->size())
return -1;
return firstNode;
}
int FilteredGraph::RemoveNode(int node)
{
m_includedNodes.set(node, false);
return NodeValue(node);
}
void FilteredGraph::RemoveNodesConnectedTo(const int node)
{
const bitset notConnected = ~RawAdjRow(node);
m_includedNodes &= notConnected;
}
double FilteredGraph::RemoveDisconnectedNodes()
{
double mult = 1.0;
for(int i=0; i<m_unfiltered->size(); ++i) {
if (m_includedNodes.test(i)) {
const int conn = AdjRow(i).count();
if (conn == 0) {
m_includedNodes.set(i, false);;
mult *= (NodeValue(i) +1);
}
}
}
return mult;
}
bool FilteredGraph::AttemptPartition(FilteredGraph* pOther)
{
typedef std::vector<int> intvec;
intvec includedNodesCache;
includedNodesCache.reserve(m_unfiltered->size());
for(int i=0; i<m_unfiltered->size(); ++i) {
if (m_includedNodes.test(i)) {
includedNodesCache.push_back(i);
}
}
if (includedNodesCache.empty())
return false;
const int startNode= includedNodesCache[0];
bitset currFoundNodes;
currFoundNodes.set(startNode);
bitset foundNodes;
do {
foundNodes |= currFoundNodes;
bitset newFoundNodes;
for(int i : includedNodesCache) {
if (currFoundNodes.test(i)) {
newFoundNodes |= AdjRow(i);
}
}
newFoundNodes &= ~ foundNodes;
currFoundNodes = newFoundNodes;
} while(currFoundNodes.count() > 0);
const size_t foundCount = foundNodes.count();
const size_t thisCount = m_includedNodes.count();
const bool isConnected = foundCount == thisCount;
if (!isConnected) {
if (foundCount < thisCount) {
pOther->m_includedNodes = foundNodes;
m_includedNodes &= ~foundNodes;
}
else {
pOther->m_includedNodes = m_includedNodes;
pOther->m_includedNodes &= ~foundNodes;
m_includedNodes = foundNodes;
}
}
return !isConnected;
}
// Initialization code to convert sequence from 1 to n into graph
typedef short numtype;
typedef std::set<numtype> numset;
bool setIntersect(const numset& setA, const numset& setB)
{
for(int a : setA) {
if (setB.find(a) != setB.end())
return true;
}
return false;
}
int Sequence2Graph(Graph** ppGraph, int n)
{
typedef std::map<numset, int> numset2int;
numset2int factors2count;
int coPrimesRemoved = n>0; // for {1}
// Calculate all sets of prime factors, and how many numbers belong to each set
for(numtype i=2; i<=n; ++i) {
if ((i > n/2) && (std::find(PRIMES, PRIMES+NPRIMES, i) !=PRIMES+NPRIMES)) {
++coPrimesRemoved;
}
else {
numset factors;
for(numtype j=0; j<NPRIMES && PRIMES[j]<n; ++j) {
if (i % PRIMES[j] == 0) {
factors.insert(PRIMES[j]);
}
}
factors2count[factors]++;
}
}
// Create graph
Graph*& pGraph = *ppGraph;
pGraph = new Graph(factors2count.size());
int srcNodeNum = 0;
for(numset2int::const_iterator i = factors2count.begin(); i!=factors2count.end(); ++i) {
pGraph->SetNodeValue(srcNodeNum, i->second);
numset2int::const_iterator j = i;
int tgtNodeNum = srcNodeNum+1;
for(++j; j!=factors2count.end(); ++j) {
if (setIntersect(i->first, j->first)) {
pGraph->SetConnection(srcNodeNum, tgtNodeNum);
}
++tgtNodeNum;
}
++srcNodeNum;
}
return coPrimesRemoved;
}
)的图表如下所示为红色(旧方法为黑色)。
根据观察到的(指数)增长率,n
的预测为30小时,假设程序没有爆炸。这开始看起来在计算上是可行的,特别是在进行更多优化的情况下,但是远远不能满足要求的5s!毫无疑问,所需的解决方案简短而甜蜜,但这很有趣......
答案 2 :(得分:2)
这在Haskell中相当简单,n = 200需要大约2秒钟,并且指数减速。
{-# OPTIONS_GHC -O2 #-}
f n = 2^(length second + 1) * (g [] first 0) where
second = filter (\x -> isPrime x && x > div n 2) [2..n]
first = filter (flip notElem second) [2..n]
isPrime k =
null [ x | x <- [2..floor . sqrt . fromIntegral $ k], k `mod`x == 0]
g s rrs depth
| null rrs = 2^(length s - depth)
| not $ and (map ((==1) . gcd r) s) = g s rs depth
+ g s' rs' (depth + 1)
| otherwise = g (r:s) rs depth
where r:rs = rrs
s' = r : filter ((==1) . gcd r) s
rs' = filter ((==1) . gcd r) rs
答案 3 :(得分:1)
这是一种方法,可以将given sequence提升到n=62
以下5s(优化时间为5 {s} n=75
,但请注意我的second attempt at this problem做得更好)。我假设问题的模数部分与函数变大时避免数值误差有关,所以我暂时忽略它。
该方法基于以下事实:我们可以为每个素数在子集中选择最多一个数字。
以{1,2,3,4}
为例,我们将集合中的数字映射到素数,如下所示。我将1作为素数包括在内,因为它使得博览会在这个阶段更容易。
1 → {1}
2 → {2,4}
3 → {3}
我们有2个组合用于&#34; prime&#34; 1(不包括它或1),3个组合用于素数2(不包括它或2或4),2个组合用于3(不包括它或3)。因此子集的数量为2 * 3 * 2 = 12
。
同样对{1,2,3,4,5}
我们有
1 → {1}
2 → {2,4}
3 → {3}
5 → {5}
给予2 * 3 * 2 * 2= 24
。
但对于{1,2,3,4,5,6}
来说,事情并非如此直截了当。我们有
1 → {1}
2 → {2,4,6}
3 → {3}
5 → {5}
但如果我们为素数2选择数字6,我们就不能为素数3选择一个数字(作为脚注,在我的第一种方法中,我可能会回来,我把它当作当我们选择6时,3的选择减少了一半,所以我使用3.5而不是4来表示素数2的组合数量,2 * 3.5 * 2 * 2 = 28
给出正确的答案。我无法得到这种方法但是,工作超出n=17
。)
我处理这个问题的方法是在每个级别拆分每组素数因子的处理。因此{2,4}
具有素数因子{2}
,而{6}
具有素因子{2,3}
。省略1的虚假条目(这不是一个素数),我们现在有
2 → {{2}→{2,4}, {2,3}→6}
3 → {{3}→{3}}
5 → {{5}→{5}}
现在有三种途径可以计算组合数量,一条路径我们不选择素数2,两条路径用于我们:通过{2}→{2,4}
和{2,3}→6
。
1 * 2 * 2 = 4
个组合,因为我们可以选择3还是不选,我们可以选择5还是不。2 * 2 * 2 = 8
组合,注意我们可以选择2或4。1 * 1 * 2 = 2
个组合,因为我们只有一个选择素数3 - 不使用它。总的来说,这给了我们4 + 8 + 2 = 14
个组合(作为优化注释,第一个和第二个路径可以折叠在一起以获得3 * 2 * 2 = 12
)。我们也可以选择是否选择1,因此组合总数为2 * 14 = 28
。
以递归方式运行路径的C ++代码如下。 (它是在Visual Studio 2012上编写的C ++ 11,但是应该在其他gcc上工作,因为我还没有包含任何特定于平台的内容)。
#include <cassert>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <iostream>
#include <ctime>
const int PRIMES[] = // http://rlrr.drum-corps.net/misc/primes1.shtml
{ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47,
53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199 };
const int NPRIMES = sizeof(PRIMES) / sizeof(int);
typedef std::vector<int> intvec;
typedef std::set<int> intset;
typedef std::vector<std::set<int>> intsetvec;
struct FactorSetNumbers
{
intset factorSet;
intvec numbers; // we only need to store numbers.size(), but nice to see the vec itself
FactorSetNumbers() {}
FactorSetNumbers(const intset& factorSet_, int n)
: factorSet(factorSet_)
{
numbers.push_back(n);
}
};
typedef std::vector<FactorSetNumbers> factorset2numbers;
typedef std::vector<factorset2numbers> factorset2numbersArray;
double NumCoPrimeSubsets(
const factorset2numbersArray& factorSet2Numbers4FirstPrime,
int primeIndex, const intset& excludedPrimes)
{
const factorset2numbers& factorSet2Numbers = factorSet2Numbers4FirstPrime[primeIndex];
if (factorSet2Numbers.empty())
return 1;
// Firstly, we may choose not to use this prime number at all
double numCoPrimeSubSets = NumCoPrimeSubsets(factorSet2Numbers4FirstPrime,
primeIndex + 1, excludedPrimes);
// Optimization: if we're not excluding anything, then we can collapse
// the above call and the first call in the loop below together
factorset2numbers::const_iterator i = factorSet2Numbers.begin();
if (excludedPrimes.empty()) {
const FactorSetNumbers& factorSetNumbers = *i;
assert(factorSetNumbers.factorSet.size() == 1);
numCoPrimeSubSets *= (1 + factorSetNumbers.numbers.size());
++i;
}
// We are using this prime number. The number of subsets for this prime number is the sum of
// the number of subsets for each set of integers whose factors don't include an excluded factor
for(; i!=factorSet2Numbers.end(); ++i) {
const FactorSetNumbers& factorSetNumbers = *i;
intset intersect;
std::set_intersection(excludedPrimes.begin(),excludedPrimes.end(),
factorSetNumbers.factorSet.begin(),factorSetNumbers.factorSet.end(),
std::inserter(intersect,intersect.begin()));
if (intersect.empty()) {
intset unionExcludedPrimes;
std::set_union(excludedPrimes.begin(),excludedPrimes.end(),
factorSetNumbers.factorSet.begin(),factorSetNumbers.factorSet.end(),
std::inserter(unionExcludedPrimes,unionExcludedPrimes.begin()));
// Optimization: don't exclude on current first prime,
// because can't possibly occur later on
unionExcludedPrimes.erase(unionExcludedPrimes.begin());
numCoPrimeSubSets += factorSetNumbers.numbers.size() *
NumCoPrimeSubsets(factorSet2Numbers4FirstPrime,
primeIndex + 1, unionExcludedPrimes);
}
}
return numCoPrimeSubSets;
}
int main(int argc, char* argv[])
{
const int MAXCALC = 80;
intsetvec primeFactors(MAXCALC +1);
// Calculate prime numbers that factor into each number upto MAXCALC
for(int i=2; i<=MAXCALC; ++i) {
for(int j=0; j<NPRIMES; ++j) {
if (i % PRIMES[j] == 0) {
primeFactors[i].insert(PRIMES[j]);
}
}
}
const clock_t start = clock();
factorset2numbersArray factorSet2Numbers4FirstPrime(NPRIMES);
for(int n=2; n<=MAXCALC; ++n) {
{
// For each prime, store all the numbers whose first prime factor is that prime
// E.g. for the prime 2, for n<=20, we store
// {2}, { 2, 4, 8, 16 }
// {2, 3}, { 6, 12, 18 }
// {2, 5}, { 5, 10, 20 }
// {2, 7}, { 14 }
const int firstPrime = *primeFactors[n].begin();
const int firstPrimeIndex = std::find(PRIMES, PRIMES + NPRIMES, firstPrime) - PRIMES;
factorset2numbers& factorSet2Numbers = factorSet2Numbers4FirstPrime[firstPrimeIndex];
const factorset2numbers::iterator findFactorSet = std::find_if(factorSet2Numbers.begin(), factorSet2Numbers.end(),
[&](const FactorSetNumbers& x) { return x.factorSet == primeFactors[n]; });
if (findFactorSet == factorSet2Numbers.end()) {
factorSet2Numbers.push_back(FactorSetNumbers(primeFactors[n], n));
}
else {
findFactorSet->numbers.push_back(n);
}
// The number of coprime subsets is the number of coprime subsets for the first prime number,
// starting with an empty exclusion list
const double numCoPrimeSubSetsForNEquals1 = 2;
const double numCoPrimeSubsets = numCoPrimeSubSetsForNEquals1 *
NumCoPrimeSubsets(factorSet2Numbers4FirstPrime,
0, // primeIndex
intset()); // excludedPrimes
const clock_t now = clock();
const clock_t ms = now - start;
std::cout << n << ", " << std::fixed << numCoPrimeSubsets << ", " << ms << "\n";
}
}
return 0;
}
计时:计算序列高达40英寸<0.1秒,序列高达50英寸(0.5秒),60英寸(2.5英寸),70英寸(20英里),80英寸(157秒)。
虽然这似乎确实输出了正确的数字,但正如预期的那样,它太昂贵了。特别是它至少需要指数时间(很可能是组合时间)。
显然此方法无法按要求扩展。然而,这里可能会有一些东西给别人提供想法(或者将这种方法排除在外是失败的)。似乎有两种可能性:
bitset
而不是set
。n=17
并且在n=18
及以上失败,由少数人出来。我花了很长时间写出模式并试图找出n=18
突然失败的原因,但却无法看到它。我可以回到这里,或者如果有人有兴趣,我会将其作为替代答案。 编辑:我已经使用一些技巧进行了一些优化,尽量避免在可能的情况下重做现有计算,并且代码速度提高了约10倍。听起来不错,但它只是常量的改进。真正需要的是对这个问题的一些见解 - 例如我们可以#subsets(n+1)
上的#subsets(n)
吗?
答案 4 :(得分:0)
我就是这样做的:
mod m
n
q
个集合,并将空集添加到其中,并将计数器设置为1 X
k
到max(X)
的所有数字n
,请检查以下因素:
该集合与数字因子相交。如果没有,请添加到
队列X U k
并将计数器增加1.否则,请转到下一个
ķ。 必须指出两件重要的事情:
n
,而只需要它们的主要因素,这意味着,对于12,您只需要2和3.这样检查2个数字是否为互质数就会检查是否两组的交集是空的。答案 5 :(得分:0)
这是O(n * 2 ^ p)中的一种方式,其中p
是n
下的素数。没有使用模数。
class FailureCoprimeSubsetCounter{
int[] primes;//list of primes under n
PrimeSet[] primeSets;//all 2^primes.length
//A set of primes under n. And a count which goes with it.
class PrimeSet{
BitSet id;//flag x is 1 iff prime[x] is a member of this PrimeSet
long tally;//number of coprime sets that do not have a factor among these primes and do among all the other primes
//that is, we count the number of coprime sets whose maximal coprime subset of primes[] is described by this object
PrimeSet(int np){...}
}
int coprimeSubsets(int n){
//... initialization ...
for(int k=1; k<=n; k++){
PrimeSet p = listToPrimeSet(PrimeFactorizer.factorize(k));
for(int i=0; i<Math.pow(2,primes.length); i++){
//if p AND primes[i] is empty
//add primes[i].tally to PrimeSet[ p OR primes[i] ]
}
}
//return sum of all the tallies
}
}
但由于这是一个竞争问题,因此必须有一个更快,更脏的解决方案。而且这种方法需要指数时间和空间,并且在3000以下有430个素数,也更优雅。
答案 6 :(得分:0)
编辑:添加了递归方法。在5秒内解决,直到n = 50。
#include <iostream>
#include <vector>
using namespace std;
int coPrime[3001][3001] = {0};
int n, m;
// function that checks whether a new integer is coprime with all
//elements in the set S.
bool areCoprime ( int p, vector<int>& v ) {
for ( int i = 0; i < v.size(); i++ ) {
if ( !coPrime[v[i]][p] )
return false;
}
return true;
}
// implementation of Euclid's GCD between a and b
bool isCoprimeNumbers( int a, int b ) {
for ( ; ; ) {
if (!(a %= b)) return b == 1 ;
if (!(b %= a)) return a == 1 ;
}
}
int subsets( vector<int>& coprimeList, int index ) {
int count = 0;
for ( int i = index+1; i <= n; i++ ) {
if ( areCoprime( i, coprimeList ) ) {
count = ( count + 1 ) % m;
vector<int> newVec( coprimeList );
newVec.push_back( i );
count = ( count + subsets( newVec, i ) ) % m;
}
}
return count;
}
int main() {
cin >> n >> m;
int count = 1; // empty set
count += n; // sets with 1 element each.
// build coPrime matrix
for ( int i = 1; i <= 3000; i++ )
for ( int j = i+1; j <= 3000; j++ )
if ( isCoprimeNumbers( i, j ) )
coPrime[i][j] = 1;
// find sets beginning with i
for ( int i = 1; i <= n; i++ ) {
vector<int> empty;
empty.push_back( i );
count = ( count + subsets( empty, i ) ) % m;
}
cout << count << endl;
return 0;
}
天真的方法可以是(对于N = 3000):
Step1:构建布尔矩阵
1.建立从2到1500的素数列表。
2.对于每个1到3000的数字,建立一组主要因素
3.比较每对集合并得到一个布尔矩阵[3000] [3000],表明元素i和j是互为互斥的(1)还是不互为(0)。
步骤2:计算长度k(k = 0到3000)的互质集数量
1.初始化count = 1(空集)。现在k = 1.保持长度k的列表。
2.构建仅包含该特定元素的3000集。 (递增计数)
3.扫描每个元素从 k到3000 ,看看是否可以通过将其添加到任何现有的长度为k的集合来形成新集合。 注意:一些新形成的集可能相同也可能不相同。如果使用集合,则只应存储唯一集合
4. 删除所有仍然长度为k 的集合
5.按当前独特集数量增加计数
6. k = k + 1并转到步骤3.
或者,您可以维护集合中每个元素的产品列表,并检查新元素是否与产品互质。在这种情况下,您不需要存储布尔矩阵。
上述算法的复杂性似乎介于O(n ^ 2)和O(n ^ 3)之间。
C ++中的完整代码:(改进:条件补充说,只有当元素>&gt;而不是集合中的最大值时,才应该在集合中检查元素。)
#include <iostream>
#include <vector>
#include <set>
using namespace std;
int coPrime[3001][3001] = {0};
// function that checks whether a new integer is coprime with all
//elements in the set S.
bool areCoprime ( int p, set<int> S ) {
set<int>::iterator it_set;
for ( it_set = S.begin(); it_set != S.end(); it_set++ ) {
if ( !coPrime[p][*it_set] )
return false;
}
return true;
}
// implementation of Euclid's GCD between a and b
bool isCoprimeNumbers( int a, int b ) {
for ( ; ; ) {
if (!(a %= b)) return b == 1 ;
if (!(b %= a)) return a == 1 ;
}
}
int main() {
int n, m;
cin >> n >> m;
int count = 1; // empty set
set< set<int> > setOfSets;
set< set<int> >::iterator it_setOfSets;
// build coPrime matrix
for ( int i = 1; i <= 3000; i++ )
for ( int j = 1; j <= 3000; j++ )
if ( i != j && isCoprimeNumbers( i, j ) )
coPrime[i][j] = 1;
// build set of sets containing 1 element.
for ( int i = 1; i <= n; i++ ) {
set<int> newSet;
newSet.insert( i );
setOfSets.insert( newSet );
count = (count + 1) % m;
}
// Make sets of length k
for ( int k = 2; k <= n; k++ ) {
// Scane each element from k to n
set< set<int> > newSetOfSets;
for ( int i = k; i <= n; i++ ) {
//Scan each existing set.
it_setOfSets = setOfSets.begin();
for ( ; it_setOfSets != setOfSets.end(); it_setOfSets++ ) {
if ( i > *(( *it_setOfSets ).rbegin()) && areCoprime( i, *it_setOfSets ) ) {
set<int> newSet( *it_setOfSets );
newSet.insert( i );
newSetOfSets.insert( newSet );
}
}
}
count = ( count + newSetOfSets.size() ) % m;
setOfSets = newSetOfSets;
}
cout << count << endl;
return 0;
}
上面的代码似乎给出了正确的结果但耗费了大量时间: 说M足够大:
For N = 4, count = 12. (almost instantaneous)
For N = 20, count = 3232. (2-3 seconds)
For N = 25, count = 11168. (2-3 seconds)
For N = 30, count = 31232 (4 seconds)
For N = 40, count = 214272 (30 seconds)
答案 7 :(得分:0)
这是我之前提到的不同方法
它确实比我之前使用的速度快得多。使用在线翻译(ideone),它可以在不到5秒的时间内计算出最多coprime_subsets(117)
。
代码构建从空集开始的可能集,并将每个数字插入所有可能的子集。
primes_to_3000 = set([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999])
# primes up to sqrt(3000), used for factoring numbers
primes = set([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53])
factors = [set() for _ in xrange(3001)]
for p in primes:
for n in xrange(p, 3001, p):
factors[n].add(p)
def coprime_subsets(highest):
count = 1
used = {frozenset(): 1}
for n in xrange(1, highest+1):
if n in primes_to_3000:
# insert the primes into all sets
count <<= 1
if n < 54:
used.update({k.union({n}): v for k, v in used.iteritems()})
else:
for k in used:
used[k] *= 2
else:
for k in used:
# only insert into subsets that don't share any prime factors
if not factors[n].intersection(k):
count += used[k]
used[k.union(factors[n])] += used[k]
return count
这是我的想法和python中的实现。它似乎很慢,但我不确定它是否只是我测试的方式(使用在线翻译)... 可能是在“真正的”计算机上运行它可能会有所作为,但我现在无法测试它。
这种方法分为两部分:
在那之后,我想你只需要采用模数......
这是我的python实现(改进版本):
# primes up to 1500
primes = 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499
factors = [set() for _ in xrange(3001)]
for p in primes:
for n in xrange(p, 3001, p):
factors[n].add(p)
def coprime_subsets(highest, current=1, factors_used=frozenset(), cache={}):
"""
Determine the number of possible coprime subsets of numbers,
using numbers starting at index current.
factor_product is used for determining if a number can be added
to the current subset.
"""
if (current, factors_used) in cache:
return cache[current, factors_used]
count = 1
for n in xrange(current, highest+1):
if factors_used.intersection(factors[n]):
continue
count += coprime_subsets(highest, n+1, factors_used.union(factors[n]))
cache[current, factors_used] = count
return count
我还有另一个想法,如果我有时间,我会尝试实施。我相信不同的方法可能会快得多。
答案 8 :(得分:-1)
看起来拟议的答案以及问题的序言部分都提出了与提出的问题不同的问题。
问题是:
Output the number of coprime subsets of {1, 2, 3, ..., n} modulo m.
建议的答案试图解决另一个问题:
Output the number of coprime subsets of {1, 2, 3, ..., n}.
这些问题并不等同。第一个处理有限环Z_m,第二个处理具有完全不同算术的整数环Z.
此外,在问题的前言中,“两个整数是互质的,如果它们的最大公约数等于1”不适用于Z_m:有限环没有算术上稳定的比较,所以没有“最大的“共同除数。”
同样的反对意见适用于问题中的例子:3和4不是相对素数模7,因为它们都可以被2模7:4 =(2 * 2)%7和3 =(2 * 5)%整除7。
事实上,Z_m算术是如此奇怪以至于人们可以在O(1)时间给出答案,至少对于素数m:对于任何n和素数m,没有互质对模m。这就是为什么:Z_m的非零元素形成一个m-1阶的循环群,这意味着对于Z_m中的任何非零元素a和b,可以为Z_m中的某些c写a = bc。这证明在Z_m中没有用于素数m的互质对。
从问题中的例子:让我们来看看{2,3} mod 7和{3,4} mod 7:2 =(3 * 3)%7和3 =(4 * 6)%7 。因此{2,3}在Z_7中不是互质的(两者都可以被3整除),而{3,4}在Z_7中不是互质的(两者都可被4整除)。
现在让我们考虑非素数m的情况。将ma写为素数的乘积m = p_1 ^ i_1 * ... * p_k ^ i_k。如果a和b有一个共同的素因子,那么它们显然不是互质的。如果它们中的至少一个(比如说b)没有划分任何素数p_1,...,p_k那么a和b的公因子大致与素数m的情况大致相同:b将是乘法Z_m的单位,因此a可以在Z_m中被b整除。
因此,当m是复合的并且a和b可被m的不同素因子整除时,仍然要考虑这种情况,假设a可被p整除,b可被q整除。在这种情况下,它们确实可以是互质的。例如,2和3模6是互质。
因此,互质对的问题归结为这些步骤:
查找小于n的m的素因子。如果没有,则没有互质对。
枚举这些素数因子的乘积,这些因子仍然是小于n的因子。
计算各种各样的Z-comprime对的数量。