我有一个程序需要下载约200行~250行的XML页面。它通过从主页面中获取ID列表来完成此操作,然后它遍历每个ID,将其插入URL并使用StreamWriter将XML页面写入文件。方法如下所示:
private static void XMLUpdate(string path)
{
try
{
//create client for downloading XMLs
var client = new WebClient();
foreach (var node in GetId())
{
//stores xml text into a string
var s = client.DownloadString("https://example" + node + "/xml");
// assign to the output
var file = new StreamWriter(path + "\\" + node + ".xml");
file.WriteLine(s);
file.Close();
}
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
private static string[] GetId()
{
var idList = new List<string>();
var request = WebRequest.Create("https://example/xml");
var i = 0;
using (var response = request.GetResponse())
using (var xmlReader = XmlReader.Create(response.GetResponseStream()))
{
while (xmlReader.Read())
{
xmlReader.ReadToFollowing("n" + i);
//go through each of the n nodes in the xmldocument and get the name and id
if (xmlReader.NodeType != XmlNodeType.Element || xmlReader.Name != "n" + i) continue;
xmlReader.ReadToFollowing("id");
if (xmlReader.NodeType == XmlNodeType.Element && xmlReader.Name == "id")
{
var id = xmlReader.ReadElementContentAsString();
idList.Add(id);
}
i += 1;
}
}
var IDs = idList.ToArray();
return IDs;
}
目前该程序需要很长时间才能下载我需要的所有内容。我有什么办法可以加快这个过程吗?
答案 0 :(得分:1)
是的,您可以使用Parallel.ForEach
:
Parallel.ForEach
( GetId()
, new ParallelOptions() { MaxDegreeOfParallelism = 32 } // run 32 in parallel
, node =>
{
//stores xml text into a string
var client = new WebClient();
var s = client.DownloadString("https://example" + node + "/xml");
// assign to the output
var file = new StreamWriter(path + "\\" + node + ".xml");
file.WriteLine(s);
file.Close();
}
);
您可以根据需要调整MaxDegreeOfParallelism
,服务可以处理。