我创建了mdb记录到sql记录的转换,对我来说它工作得很好.. 但是当记录大得多时速度很慢..我认为还有其他方法可以优化解析和提取记录吗?
using System;
using System.Collections;
using System.Collections.Generic;
using System.IO;
using System.Text;
using System.Data;
using System.Data.OleDb;
namespace conMDBToSQL
{
class Program
{
static void Main()
{
// string sFileName = "C:\\sqlrecords.mdb";
string sFileName = "C:\\sample.mdb";
OleDbConnection _con = new OleDbConnection( @"Provider=Microsoft.Jet.OLEDB.4.0;Data Source=" + sFileName + ";Mode=Share Exclusive;User Id=admin;Password=;" );
try
{
_con.Open();
string sTableName = "users";
string strSQL = String.Empty;
strSQL = "SELECT * FROM " + sTableName + " ORDER BY 1";
OleDbCommand _cmd = new OleDbCommand( strSQL, _con );
_cmd.CommandType = CommandType.Text;
OleDbDataReader _dr = _cmd.ExecuteReader( CommandBehavior.SchemaOnly );
ArrayList _Arr = new ArrayList();
DataTable dt = _dr.GetSchemaTable();
foreach ( DataRow drow in dt.Rows )
{
// Console.Write( drow[0].ToString() + "\n" );
_Arr.Add( drow[0].ToString() );
}
_dr.Close();
_cmd.Dispose();
strSQL = "SELECT * FROM " + sTableName + " ORDER BY 1";
_cmd = new OleDbCommand(strSQL, _con);
_cmd.CommandType = CommandType.Text;
_dr = _cmd.ExecuteReader();
string s_fields = String.Empty;
string s_values = String.Empty;
int arr_count = _Arr.Count - 1;
while (_dr.Read())
{
for(int i_a = 0; i_a <= arr_count;i_a++)
{
if (i_a <= arr_count)
{
s_fields += _Arr[i_a].ToString() + ",";
s_values += "'" + _dr[i_a].ToString() + "',";
}
else
{
s_fields += _Arr[i_a].ToString();
s_values += "'" + _dr[i_a].ToString() + "'";
}
}
// Build, query
strSQL = String.Empty;
strSQL = "INSERT INTO " + sTableName + "("+ s_fields +") VALUES ("+ s_values +")";
// Write, text file
StreamWriter sw = new StreamWriter(sFileName, true, Encoding.UTF8);
sw.Write(strSQL);
sw.Close();
sw = null;
}
Console.WriteLine("\n");
Console.WriteLine("Finished..");
Console.ReadKey();
}
catch ( OleDbException olex )
{
throw olex;
}
finally
{
_con.Close();
}
}
}
}
谢谢,
答案 0 :(得分:1)
SqlBulkCopy类帮了我一次。 Link here
答案 1 :(得分:0)
我认为你的主要罪魁祸首是new StreamWriter(sFileName, true, Encoding.UTF8)
代码行。对于要打开文件的每个数据行,添加单个命令,然后关闭文件。这将是非常低效的。
尝试使用StringBuilder
而不是写入流,然后在读取循环之外,将使用StringBuilder
构建的字符串写入文件一次。
我想你会发现这会大大加快速度。
答案 2 :(得分:0)
我看到两个主要问题。首先,您应该在循环开始时打开StreamReader
,并在完成后关闭它。像这样:
using (StreamWriter sw = new StreamWriter(sFilename, false, Encoding.UTF8)
{
while (dr.Read())
{
// convert here
// Each line is written to the StreamWriter
}
}
using
构造将确保StreamWriter
已正确关闭并处置。
第二个问题是将内容添加到字符串中。使用StringBuilder
:
StringBuilder s_fields = new StringBuilder();
StringBuilder s_values = new StringBuilder();
int arr_count = _Arr.Count - 1;
while (_dr.Read())
{
for(int i_a = 0; i_a <= arr_count;i_a++)
{
if (i_a <= arr_count)
{
s_fields.AppendFormat("{0},", _Arr[i_a].ToString());
s_values.AppendFormat("'{0}',", _dr[i_a].ToString());
}
else
{
s_fields.Append(_Arr[i_a].ToString());
s_values.AppendFormat("'{0}'", _dr[i_a].ToString());
}
}
strSQL = String.Format("INSERT INTO {0} ({1}) VALUES ({2})",
sTableName, s_fields.ToString(), s_values.ToString());
sw.WriteLine(strSQL);
}
这应该可以提高你的表现。