所以我调用的方法是microsoft为documentdb提供的示例代码,但在尝试创建新的存储过程时我得到一个null响应。
private static async Task RunBulkImport(string collectionLink)
{
string Datafilepath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, @"Data");
string inputDirectory = Datafilepath;
string inputFileMask = "*.json";
int maxFiles = 2000;
int maxScriptSize = 50000;
// 1. Get the files.
string[] fileNames = Directory.GetFiles(inputDirectory, inputFileMask);
DirectoryInfo di = new DirectoryInfo(inputDirectory);
FileInfo[] fileInfos = di.GetFiles(inputFileMask);
// 2. Prepare for import.
int currentCount = 0;
int fileCount = maxFiles != 0 ? Math.Min(maxFiles, fileNames.Length) : fileNames.Length;
// 3. Create stored procedure for this script.
string procedurepath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, @"SP\BulkImport.js");
string body = File.ReadAllText(procedurepath);
StoredProcedure sproc = new StoredProcedure
{
Id = "BulkImport",
Body = body
};
await TryDeleteStoredProcedure(collectionLink, sproc.Id);
sproc = await client.CreateStoredProcedureAsync(collectionLink, sproc);
while (currentCount < fileCount)
{
// 5. Create args for current batch.
// Note that we could send a string with serialized JSON and JSON.parse it on the script side,
// but that would cause script to run longer. Since script has timeout, unload the script as much
// as we can and do the parsing by client and framework. The script will get JavaScript objects.
string argsJson = CreateBulkInsertScriptArguments(fileNames, currentCount, fileCount, maxScriptSize);
var args = new dynamic[] { JsonConvert.DeserializeObject<dynamic>(argsJson) };
// 6. execute the batch.
StoredProcedureResponse<int> scriptResult = await client.ExecuteStoredProcedureAsync<int>(
sproc.SelfLink,
new RequestOptions { PartitionKey = new PartitionKey("Andersen") },
args);
// 7. Prepare for next batch.
int currentlyInserted = scriptResult.Response;
currentCount += currentlyInserted;
}
// 8. Validate
int numDocs = 0;
string continuation = string.Empty;
do
{
// Read document feed and count the number of documents.
FeedResponse<dynamic> response = await client.ReadDocumentFeedAsync(collectionLink, new FeedOptions { RequestContinuation = continuation });
numDocs += response.Count;
// Get the continuation so that we know when to stop.
continuation = response.ResponseContinuation;
}
while (!string.IsNullOrEmpty(continuation));
Console.WriteLine("Found {0} documents in the collection. There were originally {1} files in the Data directory\r\n", numDocs, fileCount);
}
private static async Task TryDeleteStoredProcedure(string collectionLink, string sprocId)
{
StoredProcedure sproc = client.CreateStoredProcedureQuery(collectionLink).Where(s => s.Id == sprocId).AsEnumerable().FirstOrDefault();
if (sproc != null)
{
await client.DeleteStoredProcedureAsync(sproc.SelfLink);
}
}
private static string CreateBulkInsertScriptArguments(string[] docFileNames, int currentIndex, int maxCount, int maxScriptSize)
{
var jsonDocumentArray = new StringBuilder();
jsonDocumentArray.Append("[");
if (currentIndex >= maxCount) return string.Empty;
jsonDocumentArray.Append(File.ReadAllText(docFileNames[currentIndex]));
int scriptCapacityRemaining = maxScriptSize;
string separator = string.Empty;
int i = 1;
while (jsonDocumentArray.Length < scriptCapacityRemaining && (currentIndex + i) < maxCount)
{
jsonDocumentArray.Append(", " + File.ReadAllText(docFileNames[currentIndex + i]));
i++;
}
jsonDocumentArray.Append("]");
return jsonDocumentArray.ToString();
}
这是BulkImport.js文件
function bulkImport(docs) {
var collection = getContext().getCollection();
var collectionLink = collection.getSelfLink();
//count used as doc index
var count = 0;
// Validate input
if (!docs) throw new Error("The array is undefined or null.");
var docsLength = docs.length;
if (docsLength == 0) {
getContext().getResponse().setBody(0);
}
// CRUD API to create a document.
tryCreate(docs[count], callback);
function tryCreate(doc, callback) {
var options = {
disableAutomaticIdGeneration: true
};
var isAccepted = collection.createDocument(collectionLink, doc, options, callback);
if (!isAccepted) getContext().getResponse().setBody(count);
}
function callback(err, doc, options) {
if (err) throw err;
count++;
if (count >= docsLength) {
getContext().getResponse().setBody(count);
} else {
tryCreate(docs[count], callback);
}
}
}
在数据文件夹中,我有100个json文件,这些文件是在示例本身中提供的。请帮我创建新程序,我正在使用documentdb模拟器。
答案 0 :(得分:0)
我可以使用documentdb模拟器创建新过程。据我所知,目前不支持使用documentDB Emulator Exploer执行存储过程。以下是我的详细步骤:
1.下载documentdb-emulator并将其安装在本地计算机
中2.从github下载文档demo code。
3.我们可以从document获取本地documentdb帐户主密钥,并将其添加到项目中
DocumentDB Emulator仅支持一个固定帐户和一个众所周知的主密钥。 DocumentDB Emulator中无法进行密钥重新生成
// Connect to the DocumentDB Emulator running locally
DocumentClient client = new DocumentClient(new Uri("https://localhost:8081"),"C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==");
4.打开documentdb模拟器以创建数据库和集合
5.执行演示代码并检查azure仿真器探测器的结果。
注意:根据屏幕截图,我们可以发现使用azure documentdb模拟器在所选过程选项卡上没有输入或结果字段。
如果我们使用azure documentdb,那么我们可以使用Azure documentdb执行 Azure门户中的过程。
如果我们对documentdb模拟器有任何疑问,我们可以通过点击documentdb模拟器浏览器上的反馈,向azure documentdb团队提供反馈。