我正在尝试制作自己的简单网络抓取工具。我想从URL下载具有特定扩展名的文件。我写了以下代码:
private void button1_Click(object sender, RoutedEventArgs e)
{
if (bw.IsBusy) return;
bw.DoWork += new DoWorkEventHandler(bw_DoWork);
bw.RunWorkerAsync(new string[] { URL.Text, SavePath.Text, Filter.Text });
}
//--------------------------------------------------------------------------------------------
void bw_DoWork(object sender, DoWorkEventArgs e)
{
try
{
ThreadPool.SetMaxThreads(4, 4);
string[] strs = e.Argument as string[];
Regex reg = new Regex("<a(\\s*[^>]*?){0,1}\\s*href\\s*\\=\\s*\\\"([^>]*?)\\\"\\s*[^>]*>(.*?)</a>", RegexOptions.Compiled | RegexOptions.CultureInvariant | RegexOptions.IgnoreCase);
int i = 0;
string domainS = strs[0];
string Extensions = strs[2];
string OutDir = strs[1];
var domain = new Uri(domainS);
string[] Filters = Extensions.Split(new char[] { ';', ',', ' ' }, StringSplitOptions.RemoveEmptyEntries);
string outPath = System.IO.Path.Combine(OutDir, string.Format("File_{0}.html", i));
WebClient webClient = new WebClient();
string str = webClient.DownloadString(domainS);
str = str.Replace("\r\n", " ").Replace('\n', ' ');
MatchCollection mc = reg.Matches(str);
int NumOfThreads = mc.Count;
Parallel.ForEach(mc.Cast<Match>(), new ParallelOptions { MaxDegreeOfParallelism = 2, },
mat =>
{
string val = mat.Groups[2].Value;
var link = new Uri(domain, val);
foreach (string ext in Filters)
if (val.EndsWith("." + ext))
{
Download((object)new object[] { OutDir, link });
break;
}
});
throw new Exception("Finished !");
}
catch (System.Exception ex)
{
ReportException(ex);
}
finally
{
}
}
//--------------------------------------------------------------------------------------------
private static void Download(object o)
{
try
{
object[] objs = o as object[];
Uri link = (Uri)objs[1];
string outPath = System.IO.Path.Combine((string)objs[0], System.IO.Path.GetFileName(link.ToString()));
if (!File.Exists(outPath))
{
//WebClient webClient = new WebClient();
//webClient.DownloadFile(link, outPath);
DownloadFile(link.ToString(), outPath);
}
}
catch (System.Exception ex)
{
ReportException(ex);
}
}
//--------------------------------------------------------------------------------------------
private static bool DownloadFile(string url, string filePath)
{
try
{
HttpWebRequest request = (HttpWebRequest)HttpWebRequest.Create(url);
request.UserAgent = "Web Crawler";
request.Timeout = 40000;
WebResponse response = request.GetResponse();
Stream stream = response.GetResponseStream();
using (FileStream fs = new FileStream(filePath, FileMode.CreateNew))
{
const int siz = 1000;
byte[] bytes = new byte[siz];
for (; ; )
{
int count = stream.Read(bytes, 0, siz);
fs.Write(bytes, 0, count);
if (count == 0) break;
}
fs.Flush();
fs.Close();
}
}
catch (System.Exception ex)
{
ReportException(ex);
return false;
}
finally
{
}
return true;
}
问题在于它适用于2个并行下载:
new ParallelOptions { MaxDegreeOfParallelism = 2, }
...它不适用于更大程度的并行性,如:
new ParallelOptions { MaxDegreeOfParallelism = 5, }
...我收到连接超时异常。
起初我认为这是因为WebClient
:
//WebClient webClient = new WebClient();
//webClient.DownloadFile(link, outPath);
...但是当我用使用DownloadFile
的函数HttpWebRequest
替换它时,我仍然遇到错误。
我在很多网页上测试过,没有任何改变。我还确认了chrome的扩展名“Download Master”,这些Web服务器允许多个并行下载。 有没有人知道为什么我会在尝试并行下载多个文件时出现超时?
答案 0 :(得分:6)
您需要指定ServicePointManager.DefaultConnectionLimit
。与同一主机的默认并发连接为2.有关使用web.config related SO post的信息,请参阅connectionManagement
。
答案 1 :(得分:1)
据我所知,IIS将限制进出的连接总数,但是这个数字应该在10 ^ 3而不是5的范围内。
您是否可以测试相同的网址?我知道很多Web服务器限制了来自客户端的同时连接数。例如:您是否尝试下载10份http://www.google.com进行测试?
如果是这样,您可能想尝试使用不同网站的列表进行测试,例如: