假设你有几个函数y1 = f1(x),y2 = f2(x)等,你想在一个图上一起绘制它们的图形。 想象一下,由于某种原因,更容易获得给定y的x的集合;如同,存在一个给出ay的oracle,将以递增的顺序返回所有x1,x2等,其中y = f1(x1),y = f2(x2)等。本质上,它计算逆赋予y的函数。假设您在y中统一采样,因此您有一个x列表。如果您绘制此数据,将每个子列表中的所有第一个x视为y的函数,并将每个子列表中的所有第二个x视为y等的不同函数,则会得到如下的图。
在上图中,横轴是y,纵轴是x。 请注意,深蓝色“曲线”是作为y的函数的最小x的集合(您将从图下方看到的点集),而品红色曲线是第二个最小x的集合,等等。显然,最左边的深蓝色和洋红色曲线属于单一功能,应该连接。当你向右移动时,存在歧义和交叉。在这些情况下,无论函数的曲线如何连接都无关紧要,只要它“看起来”合理(有一种正确的方法,但我现在就满足于此)。
现在,对于每个函数,算法输出应该是y的样本作为x的函数,当你绘制它时,它看起来像:
(为糟糕的油漆编辑工作道歉:)
我原本以为我会在第一张照片中一次前进一列并尝试根据斜率匹配相邻值以及它们彼此之间的距离,但我不确定是否有更好的方法。此外,如果这是一个已解决或记录在案的问题,那么我不知道谷歌会做什么,所以任何指针都会有所帮助。
除了
对于任何好奇的人来说,实际的应用是计算晶体的能带结构。函数是晶体的带,第一个图的水平轴是频率,垂直轴是k向量。事实证明,解决每个频率的特征值问题,以便在这种情况下更容易得到特征值(k')。需要“美化”并连接得到的带结构图是相当普遍的,我想知道是否有一种算法方法可以做到而不必手工完成。
答案 0 :(得分:1)
这是一个“连接点”的问题,有一些限制:
您可以选择最小x点并沿着该曲线前进,寻找最有可能的下一个点:寻找最近的点,通常会有一个明显的赢家。如果有两个竞争者,请选择最符合斜率和曲率约束的竞争者。在构造曲线时,从池中删除这些点,并在完成该曲线后,在xmin处开始下一个曲线。你可能不得不用几个经验法则来处理杂散点和故障,但我认为那样做。
答案 1 :(得分:0)
好的,最后我提出了一个合理的贪心算法。我首先将现有数据分解为一对一的片段(它们不会在任一域中向后弯曲)。然后通过考虑可以连接到它的所有可能候选片段从一端生长这些片段,并且基于误差度量选择最佳片段。误差度量考虑了分段端点的接近度,端点斜率的接近度,以及分段外推的点的接近度。我会在下面发布相当长的代码,以防人们在搜索这类东西。大评论块解释了发生了什么。代码中使用的术语是特定于应用程序的,我在上面的旁边解释过。
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <set>
#include <cmath>
#include <algorithm>
#include <float.h>
const double max_relative_k_error_for_local_min_duplication = 0.05;
const double max_k_to_be_considered_separate_band = 0.1; // relative to entire k extent
typedef std::pair<double,double> band_point; // first is k, second is omega
struct band_point_sorter{
bool operator()(const band_point &a, const band_point &b){
return a.first < b.first;
}
};
struct band_piece{ // a one-to-one piece
size_t n;
std::vector<band_point> points;
band_piece(size_t N, const std::vector<band_point> &pts):n(N),points(pts){}
template <class Iter>
band_piece(size_t N, Iter first, Iter last):n(N),points(first, last){
std::sort(points.begin(), points.end(), band_point_sorter());
}
};
std::ostream& operator<<(std::ostream& os, const band_piece &p){
os << "(" << p.points.front().second << "," << p.points.front().first << ")-(" << p.points.back().second << "," << p.points.back().first << ")";
return os;
}
typedef std::vector<band_point> band_cluster; // a continuous set of one-to-one pieces
typedef std::vector<size_t> band; // index of one-to-one pieces
struct k_extent{
double kmin, kmax;
k_extent(const k_extent &ext):kmin(ext.kmin),kmax(ext.kmax){}
k_extent(double KMin, double KMax):kmin(KMin),kmax(KMax){
if(kmin > kmax){ std::swap(kmin,kmax); }
}
k_extent(const std::vector<band_point> &pts):kmin(pts.front().first),kmax(pts.back().first){
if(kmin > kmax){ std::swap(kmin,kmax); }
}
void Union(const k_extent &b){
if(b.kmin < kmin){ kmin = b.kmin; }
if(b.kmax > kmax){ kmax = b.kmax; }
}
};
bool KExtentsOverlap(const k_extent &a, const k_extent &b){
return (a.kmin <= b.kmin && b.kmin <= a.kmax)
|| (a.kmin <= b.kmax && b.kmax <= a.kmax)
|| (b.kmin <= a.kmin && a.kmin <= b.kmax)
|| (b.kmin <= a.kmax && a.kmax <= b.kmax);
}
struct PieceJoiningErrorMetric{
double k_normalization;
double domega;
PieceJoiningErrorMetric(const double &min_slope, const double &omega_step):k_normalization(1/min_slope),domega(omega_step){}
double operator()(const std::vector<band_point> &p0, const std::vector<band_point> &p1){
// We have a number of criteria that we want:
// 0 The nearest endpoints of the pieces should be close in k.
// 1 The slopes at the near end points should be close.
// 2 The linear extension from the near ends of the pieces should almost match.
// Criterion 2 is the most difficult to quantify:
// If we shoot a ray out from the endpoint of one segment, the point on the
// ray nearest the other piece's endpoint should not be the ray origin (the
// nearest point on the ray to the other endpoint should be at a positive
// ray coordinate). We do this for both segment endpoints, and take the larger
// of the two minimum distances.
double dk0 = 0, dk1 = 0, dw0 = 0, dw1 = 0, slope0, slope1;
double slope0_2; // slopes of next neighboring points
double slope1_2;
double k0, k1, k_diff, w0, w1;
if(p1.front().first > p0.back().first){ // p1 entirely after p0
k0 = p0.back().first; w0 = p0.back().second;
if(p0.size() > 1){
dk0 = p0.back().first - p0[p0.size()-2].first;
dw0 = p0.back().second - p0[p0.size()-2].second;
slope0 = dw0/dk0;
if(p1.size() < 2){
slope1 = slope0;
}
if(p0.size() > 4){ // give extra space since for too short segments, this is meaningless
slope0_2 = (p0[p0.size()-2].second - p0[p0.size()-3].second)/(p0[p0.size()-2].first - p0[p0.size()-3].first);
}
}
k1 = p1.front().first; w1 = p1.front().second;
if(p1.size() > 1){
dk1 = p1.front().first - p1[1].first;
dw1 = p1.front().second - p1[1].second;
slope1 = dw1/dk1;
if(p0.size() < 2){
slope0 = slope1;
}
if(p1.size() > 4){ // give extra space since for too short segments, this is meaningless
slope1_2 = (p1[1].second - p1[2].second)/(p1[1].first - p1[2].first);
}
}
k_diff = k1 - k0;
}else if(p0.front().first > p1.back().first){ // p0 entirely after p1
k0 = p0.front().first; w0 = p0.front().second;
if(p0.size() > 1){
dk0 = p0.front().first - p0[1].first;
dw0 = p0.front().second - p0[1].second;
slope0 = dw0/dk0;
if(p1.size() < 2){
slope1 = slope0;
}
if(p0.size() > 4){ // give extra space since for too short segments, this is meaningless
slope0_2 = (p0[1].second - p0[2].second)/(p0[1].first - p0[2].first);
}
}
k1 = p1.back().first; w1 = p1.back().second;
if(p1.size() > 1){
dk1 = p1.back().first - p1[p1.size()-2].first;
dw1 = p1.back().second - p1[p1.size()-2].second;
slope1 = dw1/dk1;
if(p0.size() < 2){
slope0 = slope1;
}
if(p1.size() > 4){ // give extra space since for too short segments, this is meaningless
slope1_2 = (p1[p1.size()-2].second - p1[p1.size()-3].second)/(p1[p1.size()-2].first - p1[p1.size()-3].first);
}
}
k_diff = k0 - k1;
}else{
return DBL_MAX;
}
k0 *= k_normalization;
k1 *= k_normalization;
dk0 *= k_normalization;
dk1 *= k_normalization;
k_diff *= k_normalization;
if(p0.size() < 2 && p1.size() < 2){
return k_diff*k_diff;
}
slope0 /= k_normalization;
slope1 /= k_normalization;
// max abs value for any slope here is 1
//// Perform the ray shooting
// Suppose we start at k0,w0, the ray is
// (k0,w0) + t*(dk0,dw0)
// Distance^2 to k1 is
// (k0-k1+t*dk0)^2 + (w0-w1+t*dw0)^2
// Minimize by taking derivative w.r.t. t:
// t = [ (k1-k0)*dk0 + (w1-w0)*dw0 ] / (dk0*dk0 + dw0*dw0);
double t, x;
t = ((k1-k0)*dk0 + (w1-w0)*dw0) * (dk0*dk0 + dw0*dw0); if(t < 0){ return DBL_MAX; }
double dist2_01 = 0;
x = k0-k1+t*dk0;
dist2_01 += x*x;
x = w0-w1+t*dw0;
dist2_01 += x*x;
t = ((k0-k1)*dk1 + (w0-w1)*dw1) * (dk1*dk1 + dw1*dw1); if(t < 0){ return DBL_MAX; }
double dist2_10 = 0;
x = k1-k0+t*dk1;
dist2_10 += x*x;
x = w1-w0+t*dw1;
dist2_10 += x*x;
//double k_scale = (dk1 > dk0) ? dk1 : dk0;
//double k_ext0 = p0.back().first - p0.front().first;
//double k_ext1 = p1.back().first - p1.front().first;
//double k_scale = (k_ext0 < k_ext1) ? k_ext0 : k_ext1;
double w_diff = fabs(w1-w0);
// We take the larger of the ray shooting distances
double dist2 = (dist2_01 > dist2_10) ? dist2_01 : dist2_10;
// We take the difference of slopes here and multiply by k_diff to get something in omega range
// Further, we take the next neighboring slopes in case the slopes are corrupted due to being near a degeneracy.
double dslope = fabs(slope0-slope1) * k_diff;
if(p0.size() > 4 && p1.size() > 4){
double dslope_2 = fabs(slope0_2-slope1_2) * k_diff;
if(dslope_2 < dslope){ dslope = dslope_2; }
}
// scale oemga difference by slope so that for shallow slopes, we really want to get
// a good frequency match, also scales from omega range to k range
double max_slope = (std::abs(slope0) > std::abs(slope1)) ? slope0 : slope1;
w_diff /= max_slope;
// std::cerr << " k_diff = " << k_diff << ", w_diff = " << w_diff << ", slope0 = " << slope0 << ", slope1 = " << slope1 << ", dist2 = " << dist2 << std::endl;
return k_diff*k_diff + w_diff*w_diff + dslope*dslope + dist2;
}
};
int main(int argc, char *argv[]){
std::vector<double> freqs;
std::vector<std::vector<double> > raw_bands;
double max_k_extent = -DBL_MAX;
double min_k_extent = DBL_MAX;
double max_freq_extent = -DBL_MAX;
double min_freq_extent = DBL_MAX;
size_t max_num_k_per_omega = 0;
double min_slope = DBL_MAX; // dk/dw
std::ifstream in(argv[1]);
if(!in.is_open()){
std::cerr << "Could not open file: " << argv[1] << std::endl;
return 1;
}
std::string line;
while(getline(in, line)){
std::istringstream line_in(line);
double val;
line_in >> val;
freqs.push_back(val);
if(val < min_freq_extent){ min_freq_extent = val; }
if(val > max_freq_extent){ max_freq_extent = val; }
raw_bands.push_back(std::vector<double>());
while(line_in){
double val;
if(line_in >> val){
raw_bands.back().push_back(val);
if(val < min_k_extent){ min_k_extent = val; }
if(val > max_k_extent){ max_k_extent = val; }
}
}
if(raw_bands.back().size() > max_num_k_per_omega){ max_num_k_per_omega = raw_bands.back().size(); }
}
//// Split into one-to-one pieces
// A one-to-one piece is a connected subset of an omega-k band which is
// one-to-one with respect to omega and k.
// For a given n, the set of n-th smallest k values as a function of a
// omega is disjoint on the omega domain. We call a single connected part
// of the omega domain for a single set of n-th smallest k values an
// n-shadow, and the connected subset of the omega-k band an n-cluster.
// n-clusters are separated from one another by the bandgaps, so they are
// easy to distinguish.
// Each n-cluster is itself a conjunction of several one-to-one pieces.
// Boundaries between one-to-one pieces are defined by local minima or
// maxima on the omega domain, or by large jump discontinuities (jumps
// in k). We need to set a threshold beyond which jumps are considered
// one-to-one piece boundaries.
//
// For each band we first construct the n-clusters, then divide them into
// one-to-one pieces.
std::vector<band_piece> all_pieces;
for(size_t n = 0; n < max_num_k_per_omega; ++n){
// n is the n-th smallest k we are considering
// First collect all the clusters
std::vector<band_cluster> clusters;
size_t w = 0;
while(raw_bands[w].size() <= n){ ++w; }
for(; w < raw_bands.size(); ++w){
if(raw_bands[w].size() > n){
// If we are in a gap, there is nothing to do, otherwise...
if(0 == w || raw_bands[w-1].size() <= n){
// Encountering more k's, time to start a new cluster
clusters.push_back(band_cluster());
clusters.back().push_back(band_point(raw_bands[w][n], freqs[w]));
}else if(raw_bands[w-1].size() > n){
// We are within a cluster
clusters.back().push_back(band_point(raw_bands[w][n], freqs[w]));
}
}
}
// Now break each cluster into pieces
for(size_t c = 0; c < clusters.size(); ++c){
const band_cluster &cur_cluster = clusters[c];
band_cluster::const_iterator last_cut_loc = cur_cluster.begin();
// We skip first and last of each cluster since we can not cut adjacent to them
for(size_t w = 1; w < cur_cluster.size()-1; ++w){
double slope1 = fabs((cur_cluster[w].first - cur_cluster[w-1].first)/(cur_cluster[w].second-cur_cluster[w-1].second));
if(slope1 < min_slope){ min_slope = slope1; }
if(cur_cluster[w].first < cur_cluster[w-1].first && cur_cluster[w].first < cur_cluster[w+1].first){
// Check for local minimum
if(w > 1){
if(w >= cur_cluster.size()-2){
// can only get slope on the left, award to the left
if(last_cut_loc == cur_cluster.begin()+w+1){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w+1)); // note the +1, duplicates the extremum point
last_cut_loc = cur_cluster.begin()+w+2; ++w;
}else{
// Can get slope on both sides
double left_k_extrapolation = (cur_cluster[w-1].first-cur_cluster[w-2].first) * (cur_cluster[w].second-cur_cluster[w-2].second)/(cur_cluster[w-1].second-cur_cluster[w-2].second) + cur_cluster[w-2].first;
double right_k_extrapolation = (cur_cluster[w+1].first-cur_cluster[w+2].first) * (cur_cluster[w].second-cur_cluster[w+2].second)/(cur_cluster[w+1].second-cur_cluster[w+2].second) + cur_cluster[w+2].first;
// errors (units of k)
double left_l1_err = fabs( left_k_extrapolation-cur_cluster[w].first);
double right_l1_err = fabs(right_k_extrapolation-cur_cluster[w].first);
// Determine if the minimum should be copied
bool dup_min = false;
{ // might prefer to use a ray shooting criterion
double kext = fabs(all_pieces.back().points.front().first - all_pieces.back().points.back().first);
if(left_l1_err/kext < max_relative_k_error_for_local_min_duplication && right_l1_err/kext < max_relative_k_error_for_local_min_duplication){
dup_min = true;
}
}
if(left_l1_err < right_l1_err){
if(last_cut_loc == cur_cluster.begin()+w+1){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w+1));
if(dup_min){
last_cut_loc = cur_cluster.begin()+w;
}else{
last_cut_loc = cur_cluster.begin()+w+1; ++w;
}
}else{
if(dup_min){
if(last_cut_loc == cur_cluster.begin()+w+1){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w+1));
}else{
if(last_cut_loc == cur_cluster.begin()+w){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w));
}
last_cut_loc = cur_cluster.begin()+w;
}
}
}else if(w < cur_cluster.size()-2){ // && w <= 1
// can only get slope on the right, award to the right
if(last_cut_loc == cur_cluster.begin()+w){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w));
last_cut_loc = cur_cluster.begin()+w;
}else{
// by default give to the right
if(last_cut_loc == cur_cluster.begin()+w){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w));
last_cut_loc = cur_cluster.begin()+w;
}
// Determine whether to duplicate the minimum point
}else if(cur_cluster[w].first > cur_cluster[w-1].first && cur_cluster[w].first > cur_cluster[w+1].first){
// Check for local maximum
// Which side gets the maximum point is deterined by slope matching
if(w > 1){
if(w >= cur_cluster.size()-2){
// can only get slope on the left, award to the left
if(last_cut_loc == cur_cluster.begin()+w+1){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w+1));
last_cut_loc = cur_cluster.begin()+w+1; ++w;
}else{
// Can get slope on both sides
double left_k_extrapolation = (cur_cluster[w-1].first-cur_cluster[w-2].first) * (cur_cluster[w].second-cur_cluster[w-2].second)/(cur_cluster[w-1].second-cur_cluster[w-2].second) + cur_cluster[w-2].first;
double right_k_extrapolation = (cur_cluster[w+1].first-cur_cluster[w+2].first) * (cur_cluster[w].second-cur_cluster[w+2].second)/(cur_cluster[w+1].second-cur_cluster[w+2].second) + cur_cluster[w+2].first;
double left_l1_err = fabs( left_k_extrapolation-cur_cluster[w].first);
double right_l1_err = fabs(right_k_extrapolation-cur_cluster[w].first);
if(left_l1_err < right_l1_err){
if(last_cut_loc == cur_cluster.begin()+w+1){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w+1));
last_cut_loc = cur_cluster.begin()+w+1; ++w;
}else{
if(last_cut_loc == cur_cluster.begin()+w){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w));
last_cut_loc = cur_cluster.begin()+w;
}
}
}else if(w < cur_cluster.size()-2){ // && w <= 1
// can only get slope on the right, award to the right
if(last_cut_loc == cur_cluster.begin()+w){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w)); // note the +1, duplicates the extremum point
last_cut_loc = cur_cluster.begin()+w;
}else{
// by default give to the right
if(last_cut_loc == cur_cluster.begin()+w){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w)); // note the +1, duplicates the extremum point
last_cut_loc = cur_cluster.begin()+w;
}
}else if(w > 1){
// Check for jump discontinuity
double diff0 = cur_cluster[w-1].first-cur_cluster[w-2].first;
double diff1 = cur_cluster[w ].first-cur_cluster[w-1].first;
double diff2 = cur_cluster[w+1].first-cur_cluster[w ].first;
if((diff0 > 0 && diff1 > 0 && diff2 > 0) || (diff0 < 0 && diff1 < 0 && diff2 < 0)){
diff0 = fabs(diff0);
diff1 = fabs(diff1);
diff2 = fabs(diff2);
if(diff1 > diff2 && diff1 > diff0){
const double &larger_neighboring_diff = (diff0 > diff2) ? diff0 : diff2;
if(diff1 > 5.0 * larger_neighboring_diff){
if(last_cut_loc == cur_cluster.begin()+w){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.begin()+w));
last_cut_loc = cur_cluster.begin()+w;
}
}
}
}
}
if(last_cut_loc == cur_cluster.end()){ continue; }
all_pieces.push_back(band_piece(n, last_cut_loc, cur_cluster.end()));
}
}
/*
// Output all pieces to check them
for(size_t i = 0; i < all_pieces.size(); ++i){
std::cout << "Piece " << i << ":" << std::endl;
for(size_t j = 0; j < all_pieces[i].points.size(); ++j){
std::cout << " " << all_pieces[i].points[j].first << "\t" << all_pieces[i].points[j].second << std::endl;
}
}
*/
/*
size_t max_piece_size = 0;
size_t max_piece_ind = 0;
for(size_t b = 0; b < all_pieces.size(); ++b){
if(all_pieces[b].points.size() > max_piece_size){ max_piece_size = all_pieces[b].points.size(); }
}
std::cout << "# " << all_pieces.size() << " pieces" << std::endl;
for(size_t i = 0; i < max_piece_size; ++i){
for(size_t b = 0; b < all_pieces.size(); ++b){
if(all_pieces[b].points.size() <= i){
std::cout << "\t" << all_pieces[b].points.back().first << "\t" << all_pieces[b].points.back().second;
}else{
std::cout << "\t" << all_pieces[b].points[i].first << "\t" << all_pieces[b].points[i].second;
}
}
std::cout << std::endl;
}
return 0;
*/
//// One-to-one piece connection
// The set of one-to-one pieces must now be joined into bands. Note that
// a band must stretch from min_k_extent to max_k_extent (thereabouts) and
// so will rarely consist of a single one-to-one piece (unless it is a
// monotonically varying band).
// Invariants:
// 0 The number of pieces from all 0-clusters determines the number of
// bands present.
// 1 A piece from an n-cluster cannot be joined to another piece from
// an n-cluster (cannot join same n's).
// 2 All pieces from a single n-cluster must lie in different bands.
// 3 A piece cannot be joined to another piece whose k-extents overlap.
// 4 A junction can only be incident on an even number of pieces. This
// is usually 2 unless there is a degeneracy or cross over.
// 5 A piece from an n-cluster can only be joined to a piece from an
// n+k-cluster, where k is -M to M, where M is the multiplicity of
// the junction if there is a degeneracy, otherwise it is 1. k not 0.
//
// We shall proceed as follows. We will try to grow the bands from min k
// to max k, starting with seed pieces from the 0-clusters. At the large k
// tip of every partial band we will consider all possible pieces to join
// to it. This can be done efficiently by maintaining the set of k-extents
// for each piece. The best possible piece to use will be the minimizer of
// an error metric.
// The error metric considers both nearness of piece endpoints as well as
// slope continuity. In all cases, we work in normalized space in which
// the extent in group velocity is unity (k and omega are scaled similarly)
const size_t npieces = all_pieces.size();
std::vector<k_extent> piece_k_extents;
std::vector<band> partial_bands;
std::vector<k_extent> partial_band_k_extents;
std::set<size_t> remaining_pieces;
// Initialize our structures
piece_k_extents.reserve(npieces);
for(size_t i = 0; i < all_pieces.size(); ++i){
piece_k_extents.push_back(k_extent(all_pieces[i].points));
if(0 == all_pieces[i].n){ // use Invariants 0 and 2
if(all_pieces[i].points.front().first-min_k_extent < max_k_to_be_considered_separate_band * (max_k_extent-min_k_extent)){
partial_bands.push_back(band()); partial_bands.back().push_back(i);
partial_band_k_extents.push_back(piece_k_extents.back());
}else{
remaining_pieces.insert(i);
}
}else{
remaining_pieces.insert(i);
}
}
// Perform the growing
PieceJoiningErrorMetric error_metric(min_slope, freqs[1]-freqs[0]);
bool made_progress = true;
size_t npass = 0;
while(!remaining_pieces.empty() && made_progress){
// std::cerr << "Pass " << npass << std::endl;
made_progress = false;
// Try to grow each band one piece at a time
for(size_t b = 0; b < partial_bands.size(); ++b){
band &cur_band = partial_bands[b];
std::set<size_t> candidate_pieces;
for(std::set<size_t>::const_iterator i = remaining_pieces.begin(); i != remaining_pieces.end(); ++i){
if(*i != all_pieces[cur_band.back()].n){ // use Invariant 1
if(!KExtentsOverlap(piece_k_extents[*i], partial_band_k_extents[b])){ // use Invariant 3
candidate_pieces.insert(*i);
}
}
}
if(candidate_pieces.empty()){ continue; }
// Find best piece to use
double error = DBL_MAX;
size_t best_piece_so_far = (size_t)-1;
// std::cerr << " Considering joining " << cur_band.back() << " " << all_pieces[cur_band.back()] << " with:" << std::endl;
for(std::set<size_t>::const_iterator i = candidate_pieces.begin(); i != candidate_pieces.end(); ++i){
// std::cerr << " " << *i << " " << all_pieces[*i] << std::endl;
double new_error = error_metric(all_pieces[cur_band.back()].points, all_pieces[*i].points);
if(new_error < error){
error = new_error;
best_piece_so_far = *i;
}
}
if((size_t)-1 == best_piece_so_far){
// std::cerr << " Could not find a good candidate" << std::endl;
continue;
}
// std::cerr << " Joining with " << best_piece_so_far << " " << all_pieces[best_piece_so_far] << std::endl;
// Join the piece
partial_band_k_extents[b].Union(piece_k_extents[best_piece_so_far]);
cur_band.push_back(best_piece_so_far);
remaining_pieces.erase(remaining_pieces.find(best_piece_so_far));
made_progress = true;
}
++npass;
}
/*
// Output all bands
for(size_t b = 0; b < partial_bands.size(); ++b){
std::cout << "Band " << b << ":" << std::endl;
for(size_t i = 0; i < partial_bands[b].size(); ++i){
const band_piece &cur_piece = all_pieces[partial_bands[b][i]];
for(size_t j = 0; j < cur_piece.points.size(); ++j){
std::cout << " " << cur_piece.points[j].first << "\t" << cur_piece.points[j].second << std::endl;
}
}
}
*/
size_t max_band_size = 0;
std::vector<std::vector<band_point> > final_bands(partial_bands.size());
for(size_t b = 0; b < partial_bands.size(); ++b){
for(size_t i = 0; i < partial_bands[b].size(); ++i){
final_bands[b].insert(final_bands[b].end(), all_pieces[partial_bands[b][i]].points.begin(), all_pieces[partial_bands[b][i]].points.end());
}
if(final_bands[b].size() > max_band_size){ max_band_size = final_bands[b].size(); }
}
for(size_t i = 0; i < max_band_size; ++i){
for(size_t b = 0; b < final_bands.size(); ++b){
if(final_bands[b].size() <= i){
std::cout << "\t" << final_bands[b].back().first << "\t" << final_bands[b].back().second;
}else{
std::cout << "\t" << final_bands[b][i].first << "\t" << final_bands[b][i].second;
}
}
std::cout << std::endl;
}
std::cout << "Unused pieces:" << std::endl;
for(std::set<size_t>::const_iterator i = remaining_pieces.begin(); i != remaining_pieces.end(); ++i){
std::cout << "Piece " << *i << std::endl;
const band_piece &cur_piece = all_pieces[*i];
for(size_t j = 0; j < cur_piece.points.size(); ++j){
std::cout << " " << cur_piece.points[j].first << "\t" << cur_piece.points[j].second << std::endl;
}
}
return 0;
}
答案 2 :(得分:-1)
您所需要的只是将所有点(x,y)存储在数组中,如数据结构。然后按照x值对点列表进行排序(您可能需要编写自定义比较函数)并弹出您漂亮的函数。我认为excel甚至可以做到这一点。