在下面的代码中,我通过File对象的所有Block对象并执行基于BitConverted的序列化。在某些情况下,我有OutOfMemory异常。有没有办法优化它?
File.Serialze();
public byte[] Serialize()
{
byte[] bytes = new byte[Blocks.Count * Block.Size];
for (int i = 0; i < Blocks.Count; i++)
{
Block block = Blocks[i];
Buffer.BlockCopy(block.Serialize(), 0, bytes, i * Block.Size, Block.Size);
}
return bytes;
}
Block.Serialize()
public byte[] Serialize()
{
byte[] bytes = new byte[Size];
Buffer.BlockCopy(BitConverter.GetBytes(fid), 0, bytes, 0, sizeof(long));
Buffer.BlockCopy(BitConverter.GetBytes(bid), 0, bytes, sizeof(long), sizeof(long));
Buffer.BlockCopy(BitConverter.GetBytes(oid), 0, bytes, sizeof(long) * 2, sizeof(long));
Buffer.BlockCopy(BitConverter.GetBytes(iid), 0, bytes, sizeof(long) * 3, sizeof(long));
Buffer.BlockCopy(BitConverter.GetBytes(did), 0, bytes, sizeof(long) * 4, sizeof(long));
return bytes;
}
MemoryStream而不是byte []而是移位而不是BitConverter.GetBytes()方法:
File.Serialize()
public MemoryStream Serialize()
{
MemoryStream fileMemoryStream = new MemoryStream(Blocks.Count * Block.Size);
foreach (Block block in Blocks)
{
using (MemoryStream blockMemoryStream = block.Serialize())
{
blockMemoryStream.WriteTo(fileMemoryStream);
}
}
return fileMemoryStream;
}
Block.Serialize()
public MemoryStream Serialize()
{
MemoryStream memoryStream = new MemoryStream(Size);
memoryStream.Write(ConvertLongToByteArray(fid), 0, sizeof(long));
memoryStream.Write(ConvertLongToByteArray(bid), 0, sizeof(long));
memoryStream.Write(ConvertLongToByteArray(oid), 0, sizeof(long));
memoryStream.Write(ConvertLongToByteArray(iid), 0, sizeof(long));
memoryStream.Write(ConvertLongToByteArray(did), 0, sizeof(long));
return memoryStream;
}
private byte[] ConvertLongToByteArray(long number)
{
byte[] bytes = new byte[8];
bytes[7] = (byte)((number >> 56) & 0xFF);
bytes[6] = (byte)((number >> 48) & 0xFF);
bytes[5] = (byte)((number >> 40) & 0XFF);
bytes[4] = (byte)((number >> 32) & 0XFF);
bytes[3] = (byte)((number >> 24) & 0xFF);
bytes[2] = (byte)((number >> 16) & 0xFF);
bytes[1] = (byte)((number >> 8) & 0XFF);
bytes[0] = (byte)((number & 0XFF));
return bytes;
}
答案 0 :(得分:1)
我要问的第一个问题是:什么是计数和大小?如果那些(当成倍增加)很大那么它会咀嚼记忆。当然,序列化到一个大缓冲区总是会引起问题。最好是查看序列化为Stream的技术,然后允许使用单个中等大小的缓冲区。在您的情况下,也许每个“块”可以单独序列化并刷新到流,然后重复使用相同的中等大小的缓冲区。我个人试图避免引入不必要的“块” - 另一种技术是序列化到缓冲流并让它决定何时刷新到底层流。
最后,BitConverter想要创建byte []总是令我失望。编写该API的人需要严厉的谈话。适当的技术是使用API来获取缓冲区和偏移量,并写入现有缓冲区。分配更少。我建议在没有所有这些(诚然是短暂的)分配的情况下寻找写作方式。这对于int / long等很容易(你只需要使用移位操作) - 但对于double等,你需要不安全的代码或union-struct。