有关确保数据在磁盘上的信息(http://winntfs.com/2012/11/29/windows-write-caching-part-2-an-overview-for-application-developers/),即使在例如在停电时,似乎在Windows平台上,您需要依靠其“fsync”版本FlushFileBuffers
来最好地保证缓冲区实际上是从磁盘设备缓存刷新到存储介质本身。 FILE_FLAG_NO_BUFFERING
与FILE_FLAG_WRITE_THROUGH
的组合无法确保刷新设备缓存,但只是对文件系统缓存产生影响(如果此信息正确)。
鉴于我将使用相当大的文件,需要“事务性地”更新,这意味着在事务提交结束时执行“fsync”。所以我创建了一个小应用来测试这样做的性能。它基本上使用8次写入执行一批8个内存页大小的随机字节的顺序写入,然后刷新。批处理循环重复,在每隔这么多的书面页面之后记录性能。此外,它还有两个可配置的选项:在开始页面写入之前,对刷新进行fsync以及是否将字节写入文件的最后位置。
// Code updated to reflect new results as discussed in answer below.
// 26/Aug/2013: Code updated again to reflect results as discussed in follow up question.
// 28/Aug/2012: Increased file stream buffer to ensure 8 page flushes.
class Program
{
static void Main(string[] args)
{
BenchSequentialWrites(reuseExistingFile:false);
}
public static void BenchSequentialWrites(bool reuseExistingFile = false)
{
Tuple<string, bool, bool, bool, bool>[] scenarios = new Tuple<string, bool, bool, bool, bool>[]
{ // output csv, fsync?, fill end?, write through?, mem map?
Tuple.Create("timing FS-E-B-F.csv", true, false, false, false),
Tuple.Create("timing NS-E-B-F.csv", false, false, false, false),
Tuple.Create("timing FS-LB-B-F.csv", true, true, false, false),
Tuple.Create("timing NS-LB-B-F.csv", false, true, false, false),
Tuple.Create("timing FS-E-WT-F.csv", true, false, true, false),
Tuple.Create("timing NS-E-WT-F.csv", false, false, true, false),
Tuple.Create("timing FS-LB-WT-F.csv", true, true, true, false),
Tuple.Create("timing NS-LB-WT-F.csv", false, true, true, false),
Tuple.Create("timing FS-E-B-MM.csv", true, false, false, true),
Tuple.Create("timing NS-E-B-MM.csv", false, false, false, true),
Tuple.Create("timing FS-LB-B-MM.csv", true, true, false, true),
Tuple.Create("timing NS-LB-B-MM.csv", false, true, false, true),
Tuple.Create("timing FS-E-WT-MM.csv", true, false, true, true),
Tuple.Create("timing NS-E-WT-MM.csv", false, false, true, true),
Tuple.Create("timing FS-LB-WT-MM.csv", true, true, true, true),
Tuple.Create("timing NS-LB-WT-MM.csv", false, true, true, true),
};
foreach (var scenario in scenarios)
{
Console.WriteLine("{0,-12} {1,-16} {2,-16} {3,-16} {4:F2}", "Total pages", "Interval pages", "Total time", "Interval time", "MB/s");
CollectGarbage();
var timingResults = SequentialWriteTest("test.data", !reuseExistingFile, fillEnd: scenario.Item3, nPages: 200 * 1000, fSync: scenario.Item2, writeThrough: scenario.Item4, writeToMemMap: scenario.Item5);
using (var report = File.CreateText(scenario.Item1))
{
report.WriteLine("Total pages,Interval pages,Total bytes,Interval bytes,Total time,Interval time,MB/s");
foreach (var entry in timingResults)
{
Console.WriteLine("{0,-12} {1,-16} {2,-16} {3,-16} {4:F2}", entry.Item1, entry.Item2, entry.Item5, entry.Item6, entry.Item7);
report.WriteLine("{0},{1},{2},{3},{4},{5},{6}", entry.Item1, entry.Item2, entry.Item3, entry.Item4, entry.Item5.TotalSeconds, entry.Item6.TotalSeconds, entry.Item7);
}
}
}
}
public unsafe static IEnumerable<Tuple<long, long, long, long, TimeSpan, TimeSpan, double>> SequentialWriteTest(
string fileName,
bool createNewFile,
bool fillEnd,
long nPages,
bool fSync = true,
bool writeThrough = false,
bool writeToMemMap = false,
long pageSize = 4096)
{
// create or open file and if requested fill in its last byte.
var fileMode = createNewFile ? FileMode.Create : FileMode.OpenOrCreate;
using (var tmpFile = new FileStream(fileName, fileMode, FileAccess.ReadWrite, FileShare.ReadWrite, (int)pageSize))
{
Console.WriteLine("Opening temp file with mode {0}{1}", fileMode, fillEnd ? " and writing last byte." : ".");
tmpFile.SetLength(nPages * pageSize);
if (fillEnd)
{
tmpFile.Position = tmpFile.Length - 1;
tmpFile.WriteByte(1);
tmpFile.Position = 0;
tmpFile.Flush(true);
}
}
// Make sure any flushing / activity has completed
System.Threading.Thread.Sleep(TimeSpan.FromMinutes(1));
System.Threading.Thread.SpinWait(50); // warm up.
var buf = new byte[pageSize];
new Random().NextBytes(buf);
var ms = new System.IO.MemoryStream(buf);
var stopwatch = new System.Diagnostics.Stopwatch();
var timings = new List<Tuple<long, long, long, long, TimeSpan, TimeSpan, double>>();
var pageTimingInterval = 8 * 2000;
var prevPages = 0L;
var prevElapsed = TimeSpan.FromMilliseconds(0);
// Open file
const FileOptions NoBuffering = ((FileOptions)0x20000000);
var options = writeThrough ? (FileOptions.WriteThrough | NoBuffering) : FileOptions.None;
using (var file = new FileStream(fileName, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite, (int)(16 *pageSize), options))
{
stopwatch.Start();
if (writeToMemMap)
{
// write pages through memory map.
using (var mmf = MemoryMappedFile.CreateFromFile(file, Guid.NewGuid().ToString(), file.Length, MemoryMappedFileAccess.ReadWrite, null, HandleInheritability.None, true))
using (var accessor = mmf.CreateViewAccessor(0, file.Length, MemoryMappedFileAccess.ReadWrite))
{
byte* base_ptr = null;
accessor.SafeMemoryMappedViewHandle.AcquirePointer(ref base_ptr);
var offset = 0L;
for (long i = 0; i < nPages / 8; i++)
{
using (var memStream = new UnmanagedMemoryStream(base_ptr + offset, 8 * pageSize, 8 * pageSize, FileAccess.ReadWrite))
{
for (int j = 0; j < 8; j++)
{
ms.CopyTo(memStream);
ms.Position = 0;
}
}
FlushViewOfFile((IntPtr)(base_ptr + offset), (int)(8 * pageSize));
offset += 8 * pageSize;
if (fSync)
FlushFileBuffers(file.SafeFileHandle);
if (((i + 1) * 8) % pageTimingInterval == 0)
timings.Add(Report(stopwatch.Elapsed, ref prevElapsed, (i + 1) * 8, ref prevPages, pageSize));
}
accessor.SafeMemoryMappedViewHandle.ReleasePointer();
}
}
else
{
for (long i = 0; i < nPages / 8; i++)
{
for (int j = 0; j < 8; j++)
{
ms.CopyTo(file);
ms.Position = 0;
}
file.Flush(fSync);
if (((i + 1) * 8) % pageTimingInterval == 0)
timings.Add(Report(stopwatch.Elapsed, ref prevElapsed, (i + 1) * 8, ref prevPages, pageSize));
}
}
}
timings.Add(Report(stopwatch.Elapsed, ref prevElapsed, nPages, ref prevPages, pageSize));
return timings;
}
private static Tuple<long, long, long, long, TimeSpan, TimeSpan, double> Report(TimeSpan elapsed, ref TimeSpan prevElapsed, long curPages, ref long prevPages, long pageSize)
{
var intervalPages = curPages - prevPages;
var intervalElapsed = elapsed - prevElapsed;
var intervalPageSize = intervalPages * pageSize;
var mbps = (intervalPageSize / (1024.0 * 1024.0)) / intervalElapsed.TotalSeconds;
prevElapsed = elapsed;
prevPages = curPages;
return Tuple.Create(curPages, intervalPages, curPages * pageSize, intervalPageSize, elapsed, intervalElapsed, mbps);
}
private static void CollectGarbage()
{
GC.Collect();
GC.WaitForPendingFinalizers();
System.Threading.Thread.Sleep(200);
GC.Collect();
GC.WaitForPendingFinalizers();
System.Threading.Thread.SpinWait(10);
}
[DllImport("kernel32.dll", SetLastError = true)]
static extern bool FlushViewOfFile(
IntPtr lpBaseAddress, int dwNumBytesToFlush);
[DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)]
static extern bool FlushFileBuffers(SafeFileHandle hFile);
}
我获得的性能结果(64位Win 7,慢速主轴磁盘)不是很令人鼓舞。似乎“fsync”性能在很大程度上取决于被刷新文件的大小,这使得时间占主导地位,而不是要刷新的“脏”数据量。下图显示了小基准应用程序的4种不同设置选项的结果。
正如你所看到的,随着文件的增长,“fsync”的性能呈指数级下降(直到几GB才真正停止)。此外,磁盘本身似乎没有做很多事情(即资源监视器显示其活动时间仅为几个百分点,并且其磁盘队列在大多数情况下大部分都是空的。)
我显然希望“fsync”性能比正常的缓冲刷新要差一些,但我原本预计它会或多或少地保持不变并与文件大小无关。像这样,它似乎表明它不能与单个大文件结合使用。
是否有人有解释,不同的经验或不同的解决方案,可以确保数据在磁盘上,并且具有或多或少的恒定,可预测的性能?
已更新 请参阅下面的答案中的新信息。
答案 0 :(得分:4)
您的测试显示同步运行速度呈指数级下降,因为您每次都在重新创建文件。在这种情况下,它不再是纯粹的顺序写入 - 每次写入也会增加文件,这需要多次搜索来更新文件系统中的文件元数据。如果您使用预先存在的完全分配的文件运行所有这些作业,您会看到更快的结果,因为这些元数据更新都不会干扰。
我在Linux机器上运行了类似的测试。每次重新创建文件时的结果:
mmap direct last sync time
0 0 0 0 0.882293s
0 0 0 1 27.050636s
0 0 1 0 0.832495s
0 0 1 1 26.966625s
0 1 0 0 5.775266s
0 1 0 1 22.063392s
0 1 1 0 5.265739s
0 1 1 1 24.203251s
1 0 0 0 1.031684s
1 0 0 1 28.244678s
1 0 1 0 1.031888s
1 0 1 1 29.540660s
1 1 0 0 1.032883s
1 1 0 1 29.408005s
1 1 1 0 1.035110s
1 1 1 1 28.948555s
使用预先存在的文件的结果(显然,last_byte的情况在这里是无关紧要的。另外,第一个结果也必须创建文件):
mmap direct last sync time
0 0 0 0 1.199310s
0 0 0 1 7.858803s
0 0 1 0 0.184925s
0 0 1 1 8.320572s
0 1 0 0 4.047780s
0 1 0 1 4.066993s
0 1 1 0 4.042564s
0 1 1 1 4.307159s
1 0 0 0 3.596712s
1 0 0 1 8.284428s
1 0 1 0 0.242584s
1 0 1 1 8.070947s
1 1 0 0 0.240500s
1 1 0 1 8.213450s
1 1 1 0 0.240922s
1 1 1 1 8.265024s
(注意我只使用了10,000个块而不是25,000个块,所以这只是写320MB,使用ext2文件系统。我没有更大的ext2fs方便,我的大fs是XFS而且它拒绝允许mmap + direct I / O。)
以下是代码,如果您有兴趣:
#define _GNU_SOURCE 1
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#define USE_MMAP 8
#define USE_DIRECT 4
#define USE_LAST 2
#define USE_SYNC 1
#define PAGE 4096
#define CHUNK (8*PAGE)
#define NCHUNKS 10000
#define STATI 1000
#define FSIZE (NCHUNKS*CHUNK)
main()
{
int i, j, fd, rc, stc;
char *data = valloc(CHUNK);
char *map, *dst;
char sfname[8];
struct timeval start, end, stats[NCHUNKS/STATI+1];
FILE *sfile;
printf("mmap\tdirect\tlast\tsync\ttime\n");
for (i=0; i<16; i++) {
int oflag = O_CREAT|O_RDWR|O_TRUNC;
if (i & USE_DIRECT)
oflag |= O_DIRECT;
fd = open("dummy", oflag, 0666);
ftruncate(fd, FSIZE);
if (i & USE_LAST) {
lseek(fd, 0, SEEK_END);
write(fd, data, 1);
lseek(fd, 0, SEEK_SET);
}
if (i & USE_MMAP) {
map = mmap(NULL, FSIZE, PROT_WRITE, MAP_SHARED, fd, 0);
if (map == (char *)-1L) {
perror("mmap");
exit(1);
}
dst = map;
}
sprintf(sfname, "%x.csv", i);
sfile = fopen(sfname, "w");
stc = 1;
printf("%d\t%d\t%d\t%d\t",
(i&USE_MMAP)!=0, (i&USE_DIRECT)!=0, (i&USE_LAST)!=0, i&USE_SYNC);
fflush(stdout);
gettimeofday(&start, NULL);
stats[0] = start;
for (j = 1; j<=NCHUNKS; j++) {
if (i & USE_MMAP) {
memcpy(dst, data, CHUNK);
if (i & USE_SYNC)
msync(dst, CHUNK, MS_SYNC);
dst += CHUNK;
} else {
write(fd, data, CHUNK);
if (i & USE_SYNC)
fdatasync(fd);
}
if (!(j % STATI)) {
gettimeofday(&end, NULL);
stats[stc++] = end;
}
}
end.tv_usec -= start.tv_usec;
if (end.tv_usec < 0) {
end.tv_sec--;
end.tv_usec += 1000000;
}
end.tv_sec -= start.tv_sec;
printf(" %d.%06ds\n", (int)end.tv_sec, (int)end.tv_usec);
if (i & USE_MMAP)
munmap(map, FSIZE);
close(fd);
for (j=NCHUNKS/STATI; j>0; j--) {
stats[j].tv_usec -= stats[j-1].tv_usec;
if (stats[j].tv_usec < 0) {
stats[j].tv_sec--;
stats[j].tv_usec+= 1000000;
}
stats[j].tv_sec -= stats[j-1].tv_sec;
}
for (j=1; j<=NCHUNKS/STATI; j++)
fprintf(sfile, "%d\t%d.%06d\n", j*STATI*CHUNK,
(int)stats[j].tv_sec, (int)stats[j].tv_usec);
fclose(sfile);
}
}
答案 1 :(得分:2)
这是我的synctest代码的Windows版本。我只在VirtualBox vm中运行它,所以我认为我没有任何有用的数字可用于比较,但你可以试一试与你机器上的C#数字进行比较。我将OPEN_ALWAYS传递给CreateFile,因此它将重用现有文件。如果要每次使用空文件再次测试,请将该标志更改为CREATE_ALWAYS。
我注意到的一件事是,第一次运行此程序时结果要快得多。也许NTFS在覆盖现有数据方面不是很有效,并且文件碎片效应在随后的运行中出现。
#include <windows.h>
#include <stdio.h>
#define USE_MMAP 8
#define USE_DIRECT 4
#define USE_LAST 2
#define USE_SYNC 1
#define PAGE 4096
#define CHUNK (8*PAGE)
#define NCHUNKS 10000
#define STATI 1000
#define FSIZE (NCHUNKS*CHUNK)
static LARGE_INTEGER cFreq;
int gettimeofday(struct timeval *tv, void *unused)
{
LARGE_INTEGER count;
if (!cFreq.QuadPart) {
QueryPerformanceFrequency(&cFreq);
}
QueryPerformanceCounter(&count);
tv->tv_sec = count.QuadPart / cFreq.QuadPart;
count.QuadPart %= cFreq.QuadPart;
count.QuadPart *= 1000000;
tv->tv_usec = count.QuadPart / cFreq.QuadPart;
return 0;
}
main()
{
int i, j, rc, stc;
HANDLE fd;
char *data = _aligned_malloc(CHUNK, PAGE);
char *map, *dst;
char sfname[8];
struct timeval start, end, stats[NCHUNKS/STATI+1];
FILE *sfile;
DWORD len;
printf("mmap\tdirect\tlast\tsync\ttime\n");
for (i=0; i<16; i++) {
int oflag = FILE_ATTRIBUTE_NORMAL;
if (i & USE_DIRECT)
oflag |= FILE_FLAG_NO_BUFFERING|FILE_FLAG_WRITE_THROUGH;
fd = CreateFile("dummy", GENERIC_READ|GENERIC_WRITE, 0, NULL,
OPEN_ALWAYS, oflag, NULL);
SetFilePointer(fd, FSIZE, NULL, FILE_BEGIN);
SetEndOfFile(fd);
if (i & USE_LAST)
WriteFile(fd, data, 1, &len, NULL);
SetFilePointer(fd, 0, NULL, FILE_BEGIN);
if (i & USE_MMAP) {
HANDLE mh;
mh = CreateFileMapping(fd, NULL, PAGE_READWRITE,
0, FSIZE, NULL);
map = MapViewOfFile(mh, FILE_MAP_WRITE, 0, 0,
FSIZE);
CloseHandle(mh);
dst = map;
}
sprintf(sfname, "%x.csv", i);
sfile = fopen(sfname, "w");
stc = 1;
printf("%d\t%d\t%d\t%d\t",
(i&USE_MMAP)!=0, (i&USE_DIRECT)!=0, (i&USE_LAST)!=0, i&USE_SYNC);
fflush(stdout);
gettimeofday(&start, NULL);
stats[0] = start;
for (j = 1; j<=NCHUNKS; j++) {
if (i & USE_MMAP) {
memcpy(dst, data, CHUNK);
FlushViewOfFile(dst, CHUNK);
dst += CHUNK;
} else {
WriteFile(fd, data, CHUNK, &len, NULL);
}
if (i & USE_SYNC)
FlushFileBuffers(fd);
if (!(j % STATI)) {
gettimeofday(&end, NULL);
stats[stc++] = end;
}
}
end.tv_usec -= start.tv_usec;
if (end.tv_usec < 0) {
end.tv_sec--;
end.tv_usec += 1000000;
}
end.tv_sec -= start.tv_sec;
printf(" %d.%06ds\n", (int)end.tv_sec, (int)end.tv_usec);
if (i & USE_MMAP)
UnmapViewOfFile(map);
CloseHandle(fd);
for (j=NCHUNKS/STATI; j>0; j--) {
stats[j].tv_usec -= stats[j-1].tv_usec;
if (stats[j].tv_usec < 0) {
stats[j].tv_sec--;
stats[j].tv_usec+= 1000000;
}
stats[j].tv_sec -= stats[j-1].tv_sec;
}
for (j=1; j<=NCHUNKS/STATI; j++)
fprintf(sfile, "%d\t%d.%06d\n", j*STATI*CHUNK,
(int)stats[j].tv_sec, (int)stats[j].tv_usec);
fclose(sfile);
}
}
答案 2 :(得分:1)
我已经进行了实验和更多的测试并找到了一个可以为我接受的解决方案(虽然目前我只测试了顺序写入)。在这个过程中,我发现了一些引发一些新问题的意外行为。我将为那些人发布一个新的SO问题(Explanation/information sought: Windows write I/O performance with "fsync" (FlushFileBuffers))。
我在基准测试中添加了以下两个附加选项:
FILE_FLAG_NO_BUFFERING
和FILE_FLAG_WRITE_THROUGH
标志)这为我提供了一些意想不到的结果,其中一个给了我一个或多或少可接受的解决方案来解决我的问题。当“fsyncing”与无缓冲/写入I / O结合使用时,我没有观察到写入速度的指数衰减。因此(虽然速度不是很快),这为我提供了一个解决方案,可以确保数据在磁盘上,并且具有不受文件大小影响的可预测性能。
其他一些意想不到的结果如下:
我已将用于基准测试的更新代码添加到原始问题中。
下图显示了一些其他新结果。
答案 3 :(得分:0)
[错误;见评论。]
我认为您引用的文章不正确,说明FlushFileBuffers对无缓冲I / O有任何有用的影响。它指的是一篇微软论文,但该论文没有提出这样的主张。
根据文档,使用无缓冲I / O与每次写入后调用FlushFileBuffer具有相同的效果,但效率更高。所以实际的解决方案是使用无缓冲的I / O而不是使用FlushFileBuffer。
但请注意,使用内存映射文件会使缓冲设置失败。如果您尝试尽快将数据推送到磁盘,我不建议使用内存映射文件。