我是C ++的新手,我确实需要一些帮助。我正在尝试创建一种结构以与GSL蒙特卡洛算法对接(对于这个示例,这实际上并不重要)。我搜索了所有C ++教程,stackoverflow帖子和GSL文档,但都没有碰到运气。我正在使用armadillo软件包进行矩阵处理;它非常强大。根据文档,我无法在结构内使用动态数组,因此我试图找到一种方法来使结构变量* M指向数组* L []中的值。我确信使用向量会更好,但是1)代码的其余部分(格式错误)已经使用了指针,并且2)我将其视为学习经验。我对* M和* L []的地址在我的代码中不一样感到惊讶。同样不那么重要的是,我对我的std :: cout为每行打印不同数量的空格感到惊讶。代码将在打印最后一个std :: cout之前退出,如下面的输出所示。
感谢您的帮助!
#include "pch.h"
#include "stdio.h"
#include "complex"
#include "new"
#include "armadillo"
using namespace arma;
class Link
{
public:
arma::Mat<cx_double>::fixed<3, 3>* dir[4]; // pointer to directional SU(3) matrices
Link(); // default constructor
};
Link::Link() // default constructor - all directional matrices are the identity
{
for (size_t hcount = 0; hcount < 4; hcount++)
{
dir[hcount] = new arma::Mat<cx_double>::fixed<3, 3>{ fill::eye }; // create directional matrix in direction hcount
}
}
struct Param
{
Link* M;
};
int main()
{
const int size = 10;
Param* Parameters = new Param{ NULL };
Link* L[size];
arma::Mat<cx_double>::fixed<3, 3> One{ fill::eye };
for (size_t hcount = 0; hcount < 10; hcount++)
{
L[hcount] = new Link();
*L[hcount]->dir[1] = *L[hcount]->dir[1] + hcount * One; // Make each array element #1 unique
}
Parameters->M = L[0];
std::cout << "&L = " << &L << std::endl;
std::cout << "&Parameters->M = " << &Parameters->M << std::endl; // surprised that addresses are not the same
std::cout << std::endl;
std::cout << "&L[0] = " << &L[0] << std::endl;
std::cout << "&Parameters->M[0] = " << &Parameters->M[0] << std::endl;
std::cout << std::endl;
std::cout << "&L[5] = " << &L[5] << std::endl;
std::cout << "&Parameters->M[5] = " << &Parameters->M[5] << std::endl;
std::cout << std::endl;
std::cout << "&L[5]->dir[1] = " << &L[5]->dir[1] << std::endl;
std::cout << "&Parameters->M[5].dir[1] = " << &Parameters->M[5].dir[1] << std::endl;
std::cout << std::endl;
std::cout << "*L[5]->dir[1] = " << *L[5]->dir[1] << std::endl; // This works
std::cout << "*Parameters->M[5].dir[1] = " << *Parameters->M[5].dir[1] << std::endl; // This does not
std::cout << std::endl;
}
OUTPUT
&L = 0024F7CC
&Parameters->M = 004EEFD8
&L[0] = 0024F7CC
&Parameters->M[0] = 004E0578
&L[5] = 0024F7E0
&Parameters->M[5] = 004E05C8
&L[5]->dir[1] = 004E50C4
&Parameters->M[5].dir[1] = 004E05CC
*L[5]->dir[1] = (+6.000e+00,+0.000e+00) (0,0) (0,0)
(0,0) (+6.000e+00,+0.000e+00) (0,0)
(0,0) (0,0) (+6.000e+00,+0.000e+00)
*Parameters->M[5].dir[1] =
答案 0 :(得分:0)
java.lang.IllegalArgumentException: mandatory key is not supported by FileSystemDataInputStreamBuilder
at com.google.common.base.Preconditions.checkArgument(Preconditions.java:92)
at org.apache.hadoop.fs.FileSystem$FileSystemDataInputStreamBuilder.build(FileSystem.java:3650)
at com.amazonaws.emr.s3select.spark.json.S3SelectJsonLineRecordReader.initialize(S3SelectJsonLineRecordReader.java:79)
at com.amazonaws.emr.s3select.spark.json.JsonS3SelectFileLinesReader.<init>(JsonS3SelectFileLinesReader.scala:59)
at com.amazonaws.emr.s3select.spark.json.JsonS3SelectDataSource.readFile(JsonS3SelectDataSource.scala:56)
at com.amazonaws.emr.s3select.spark.json.JsonS3SelectFileFormat$$anonfun$buildReader$2.apply(JsonS3SelectFileFormat.scala:106)
at com.amazonaws.emr.s3select.spark.json.JsonS3SelectFileFormat$$anonfun$buildReader$2.apply(JsonS3SelectFileFormat.scala:104)
at org.apache.spark.sql.execution.datasources.FileFormat$$anon$1.apply(FileFormat.scala:148)
at org.apache.spark.sql.execution.datasources.FileFormat$$anon$1.apply(FileFormat.scala:132)
at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:177)
at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:101)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:232)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:170)
at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1.apply(FileFormatWriter.scala:169)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
是&L
的地址,因此它是指向第一个元素的指针的地址,而不是第一个元素本身的地址。与L
相同。这是& Parameters->M
成员M
的地址。您希望将Parameters
与L[0]
进行比较,除非当M不应指向Parameters->M
所指的元素而是指向数组本身的开头时,则要与{{ 1}}。但是然后您还必须更改分配。
我发现使用指针数组有点奇怪。只需使用L[0]
数组即可。