i have attached screen shot of the resultant CSV file
我正在尝试使用dfc编码将dql查询的结果写入单独的CSV文件中。但是第一张记录的结果是在得到的纸张中打印两次。
public class ChkGroupExistence {
/**
* @param args
*/
private static Properties queryProp;
//private static Properties configProp;
IDfSession sess = null;
public ChkGroupExistence() {
System.out.println("Loading Properties..");
LoadProps loadProp = LoadProps.getInstance();
queryProp = loadProp.getQueryProp();
//configProp = loadProp.getConfigProp();
List<String> proj_list = new ArrayList<String>();
List<String> grp_list = new ArrayList<String>();
List<String> acl_list = new ArrayList<String>();
HashMap<String, String> projList_Map = new HashMap<String, String>();
IDfCollection projId_coll = null;
IDfCollection grp_coll = null;
//IDfCollection chk_coll = null;
//IDfCollection acl_coll = null;
String grpqry = null;
String chkqry = null;
//String getACLQuery = null;
int j=0;
CreateSession ifcDocsDfSession = new CreateSession();
try {
sess = ifcDocsDfSession.getSession();
DfLogger.info(this, "Session Created ::" + sess.getSessionId(),
null, null);
} catch (DfException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
String qry = queryProp
.getProperty(IdocsConstants.PROJECT_ID);
try {
CSVWriter csvwriter=new CSVWriter(new FileWriter(new File("C:\\WBG\\IFCTools\\cs_log.csv")));
projId_coll = Util.executeQuery(sess, qry, IDfQuery.READ_QUERY);
while (projId_coll.next()) {
proj_list.add(projId_coll.getString("project_id"));
}
System.out.println("List of Project ::"+proj_list.size());
String tempQuery=queryProp.getProperty(IdocsConstants.P_GROUP_EXIST);
//String tempQuery1=queryProp.getProperty(IdocsConstants.P_GROUP_VERIFY);
//String tempQuery2 = queryProp.getProperty(IdocsConstants.P_GETACL);
List<String[]> csvList=new ArrayList<String[]>();
List<String[]> titleList = new ArrayList<String[]>();
String[] projList;
String[] titleString;
titleString = new String[3];
titleString[0]="ProjectId/Institutionnbr";
titleString[1]="GroupName";
titleString[2]="ACL Name";
titleList.add(titleString);
csvwriter.writeAll(titleList);
for(int i = 0; i <proj_list.size();i++ ) {
//grpqry = tempQuery+proj_list.get(i) + "_ed_off_grp'" ;
grpqry = MessageFormat.format(tempQuery,proj_list.get(i));
//chkqry = queryProp.getProperty(IdocsConstants.P_GROUP_VERIFY);
//System.out.println(grpqry);
//getACLQuery = MessageFormat.format(tempQuery2, proj_list.get(i));
//System.out.println(getACLQuery);
//System.out.println("grp_coll query is executing....");
grp_coll = Util.executeQuery(sess, grpqry, IDfQuery.READ_QUERY);
//System.out.println("verification query is executing.....");
//chk_coll = Util.executeQuery(sess, chkqry, IDfQuery.READ_QUERY);
//acl_coll = Util.executeQuery(sess, getACLQuery, IDfQuery.READ_QUERY);
if (grp_coll!=null && grp_coll.next()) {
String grpName = grp_coll.getString("group_name");
grp_list.add(grpName);
System.out.println("Got group for "+proj_list.get(i)+" :: "+grpName);
projList=new String[3];
projList[0]=proj_list.get(i);
projList[1]=grpName;
//System.out.println(grpName);
projList_Map.put(proj_list.get(i),grp_list.get(j));
j++;
System.out.println(projList_Map.size());
if(chkqry == null){
//System.out.println("group names are adding to the list.....");
//grp_list.add(grpName);
String acl_name = queryProp.getProperty(IdocsConstants.P_GETACL);
acl_list.add(acl_name);
projList[2]=acl_name;
}
csvList.add(projList);
csvwriter.writeAll(csvList);
}
}
System.out.println("Project List is loading....");
Set<String> keySet = projList_Map.keySet();
System.out.println(grp_list);
System.out.println(acl_list);
for(String set : keySet) {
System.out.println(set + " : " +projList_Map.get(set));
}
csvwriter.close();
} catch (DfException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
catch (IndexOutOfBoundsException e) {
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public static void main(String args[])
{
ChkGroupExistence chkexist = new ChkGroupExistence();
}
}
CSVWriter.java
公共类CSVWriter {
/**
* Default Delimiter will be used if not given.
*/
private static final char DEF_DELIM = ',';
/**
* Default Quote Character will be used if not given.
*/
private static final char DEF_QUOTE_CHAR = '"';
/**
* Default End of Line Character.
*/
private static final char DEFAULT_EOL = '\n';
/**
* Contains the Delimtter.
*/
private char delimiter;
/**
* Contains the Quote character.
*/
private char quotechar;
/**
* String contains the End of the line character.
*/
private char cLineEnd;
/**
* Instance Variable to hold Write Object.
*/
private Writer rawWriter;
/**
* Instance variable to hold PrintWriter object
*/
private PrintWriter pw;
/**
* Constructor to take File Writer as Input.
*
* @param writer
* File Writer
*/
public CSVWriter(Writer writer) {
this(writer, DEF_DELIM);
}
/**
* Constructor to take File Writer and Delimiter as Input.
*
* @param writer
* File Writer
* @param delim
* Delimiter
*/
public CSVWriter(Writer writer, char delim) {
this(writer, delim, DEF_QUOTE_CHAR);
}
/**
* Constructor to take File Writer, Delimiter and Quote Character as Input.
*
* @param writer
* File Writer
* @param delim
* Delimiter
* @param quote
* Quote Character
*/
public CSVWriter(Writer writer, char delim, char quote) {
this(writer, delim, quote, DEFAULT_EOL);
}
/**
* Constructor to take File Writer, Delimiter, Quote Character and End of
* Line feed as Input.
*
* @param writer
* File Writer
* @param delim
* Delimiter
* @param quote
* @param sEOL
*/
public CSVWriter(Writer writer, char delim, char quote, char sEOL) {
rawWriter = writer;
pw = new PrintWriter(writer);
delimiter = delim;
quotechar = quote;
cLineEnd = sEOL;
}
/**
* Method takes List as input and writes values into the CSV file.
*
* @param list
* List of Cell values.
*/
public void writeAll(List list) {
String sRow[];
for (Iterator iter = list.iterator(); iter.hasNext(); writeNext(sRow)) {
sRow = (String[]) iter.next();
}
}
/**
* Method that takes String[] as input and writes each and every cell.
*
* @param sRow
* String[]
*/
private void writeNext(String sRow[]) {
StringBuffer stringbuffer = new StringBuffer();
for (int i = 0; i < sRow.length; i++) {
if (i != 0) {
stringbuffer.append(delimiter);
}
String s = sRow[i];
if (s == null) {
continue;
}
if (quotechar != 0) {
stringbuffer.append(quotechar);
}
for (int j = 0; j < s.length(); j++) {
char c = s.charAt(j);
if (c == quotechar) {
stringbuffer.append(DEF_QUOTE_CHAR).append(c);
continue;
}
if (c == DEF_QUOTE_CHAR) {
stringbuffer.append(DEF_QUOTE_CHAR).append(c);
} else {
stringbuffer.append(c);
}
}
if (quotechar != 0) {
stringbuffer.append(quotechar);
}
}
stringbuffer.append(cLineEnd);
pw.write(stringbuffer.toString());
}
/**
* Method that closed the Print Writer. Only when this method is called, the
* CSV file will be saved.
*
* @throws IOException
*/
public void close() throws IOException {
pw.flush();
pw.close();
rawWriter.close();
}
答案 0 :(得分:1)
可能是因为您在查询中包含重复属性。
提示:当我这样做时,我倾向于包含对象ID以供参考和调试。
您还可以插入&#34; DISTINCT&#34;包含这些重复属性时,您的查询中包含关键字。
他们这样做的原因是数据模型的本质:所有重复属性都存储在同一个表中 - 因此当其中一个属性包含多个值时,查询可能会在查询另一个重复属性时返回多行。在这种情况下,SELECT DISTINCT可以解决这个问题。
如果要使用单值属性连接重复属性,也可以使用DQL提示ENABLE(ROW_BASED)。
快乐的编码!