此刻,我有一个很大的csv文件,其大小为11 MB,包含许多数据,这些数据已插入到... excel文件的末尾。所以这将是10列中的100万行。现在,我想编写一个C#代码,它可以更快地导入该文件。
我做了什么?
首先,我编写了从csv文件导入所有数据的代码:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Collections;
using System.Data.OleDb;
using System.IO;
using System.Configuration;
using MySql.Data.MySqlClient;
namespace ControlDataBase
{
public partial class Import_data_mysql : Form
{
public Import_data_mysql()
{
InitializeComponent();
}
New_Tables frm2 = (New_Tables)Application.OpenForms["New_Tables"];
private DataTable ImportFile()
{
DataTable imported_data = new DataTable();
OpenFileDialog ofd = new OpenFileDialog();
ofd.Title = "Open csv file";
ofd.DefaultExt = "*.csv";
ofd.Filter = "Documents (*.csv)|*.csv";
ofd.ShowDialog();
FileInfo fi = new FileInfo(ofd.FileName);
string FileName1 = ofd.FileName;
string excel = fi.FullName;
using(StreamReader sr = new StreamReader(excel))
{
string header = sr.ReadLine();
if (string.IsNullOrEmpty(header))
{
MessageBox.Show("Not found or loaded not correct file.");
return null;
}
string[] header_columns = header.Split(';');
foreach(string header_column in header_columns)
{
imported_data.Columns.Add(header_column);
}
while (!sr.EndOfStream)
{
string line = sr.ReadLine();
if (string.IsNullOrEmpty(line)) continue;
string[] fields = line.Split(';');
DataRow imported_row = imported_data.NewRow();
for (int i = 0; i < fields.Count(); i++)
{
imported_row[i] = fields[i];
}
imported_data.Rows.Add(imported_row);
}
}
return imported_data;
}
第二,我可以在mysql数据库中插入“ order_status”表:
private void save_status_to_database(DataTable imported_data)
{
string connect = "datasource=localhost;port=3306;username=root;password=;CharSet=utf8mb4";
using (MySqlConnection conn = new MySqlConnection(connect))
{
conn.Open();
foreach (DataRow importRow in importowane_dane.Rows)
{
string query5 = @INSERT IGNORE INTO try1.order_status(ID_WORKER, ID_ORDER, ID_MODULE, ID_PROJECT,
AMOUNT_OF_PRODUCTS, BEGIN_DATE, END_DATE) SELECT workers.ID_WORKER, orders.ID_ORDER, module.ID_MODULE,
projects.ID, @AMOUNT_OF_PRODUCTS, @BEGIN_DATE, @END_DATE FROM try1.workers INNER JOIN try1.orders
INNER JOIN try1.modules INNER JOIN try1.projects WHERE workers.FNAME = @FNAME AND workers.LNAME = @LNAME
AND workers.ID_WORKER = @ID_WORKER AND orders.DESC_ORDER = @DESC_ORDER
AND orders.ORDER_NUMBER = @ORDER_NUMBER AND modules.NAME = @MODULES_NAME
AND projects.PROJECT_NAME = @PROJECT_NAME"
MySqlCommand cmd = new MySqlCommand(query5, conn);
cmd.Parameters.AddWithValue("@ID_WORKER", importRow["ID_WORKER"]);
cmd.Parameters.AddWithValue("@FNAME", importRow["FNAME"]);
cmd.Parameters.AddWithValue("@LNAME", importRow["LNAME"]);
cmd.Parameters.AddWithValue("@DESC_ORDER", importRow["DESC_ORDER"]);
cmd.Parameters.AddWithValue("@ORDER_NUMBER", importRow["ORDER_NUMBER"]);
cmd.Parameters.AddWithValue("@MODULES_NAME", importRow["NAME"]);
cmd.Parameters.AddWithValue("@PROJECT_NAME", importRow["PROJECT_NAME"]);
cmd.Parameters.AddWithValue("@AMOUNT_OF_PRODUCTS", importRow["AMOUNT_OF_PRODUCTS"]);
cmd.Parameters.AddWithValue("@BEGIN_DATE", importRow["BEGIN_DATE"]);
cmd.Parameters.AddWithValue("@END_DATE", importRow["END_DATE"]);
cmd.ExecuteNonQuery();
}
conn.Close();
}
MessageBox.Show("Imported to database.");
}
但是,当我插入11 MB的大文件时,它会导入很多时间,大约需要10分钟。在编译此代码的过程中,内存的一半时间大约有... 5 GB!
现在,我想知道如何加快从大型csv文件导入数据的速度。 MysqlBulkLoader
就足够了吗?也许应该以其他方式重写导入代码?有任何想法吗?寻求帮助。
我尝试了什么?
我已经尝试过以x64模式运行,并在App.config中添加<runtime> <gcAllowVeryLargeObjects enabled="true" /> </runtime>
。但这还没有帮助。
答案 0 :(得分:0)
我认为ImportFile函数很好,不要出现问题。 额外的时间在SQLQuery和foreach上。您应该使用一些更新批处理功能,而不是一次查询一行。
INSERT INTO tbl_name
(a,b,c)
VALUES
(1,2,3),
(4,5,6),
(7,8,9);
1。从文件构建Importd_data
2。从imported_data构建SQL查询
3。运行查询
也许您可以同时进行1和2。祝你好运!