我正在尝试在调整窗口/控件的大小时实现渲染目标的大小调整。
然而,当这样做时它没有按预期工作(可能因为我没有正确地做)因为渲染的纹理没有填满我的整个渲染目标视图。
现在,当窗口调整大小时,我重置渲染目标视图和任何其他渲染目标(纹理)[请参阅下面的代码]
this.ImgSource.SetRenderTargetDX11(null);
Disposer.SafeDispose(ref this.m_RenderTargetView);
Disposer.SafeDispose(ref this.m_d11Factory);
Disposer.SafeDispose(ref this.RenderTarget);
int width = (int)sizeInfo.Width;
int height = (int)sizeInfo.Height;
Texture2DDescription colordesc = new Texture2DDescription
{
BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
Format = PIXEL_FORMAT,
Width = width,
Height = height,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Default,
OptionFlags = ResourceOptionFlags.Shared,
CpuAccessFlags = CpuAccessFlags.None,
ArraySize = 1
};
this.RenderTarget = new Texture2D(this.Device, colordesc);
m_RenderTargetView = new RenderTargetView(this.Device, this.RenderTarget);
m_depthStencil = CreateTexture2D(this.Device, width, height, BindFlags.DepthStencil, Format.D24_UNorm_S8_UInt);
m_depthStencilView = new DepthStencilView(this.Device, m_depthStencil);
Device.ImmediateContext.Rasterizer.SetViewport(0, 0, width, height, 0.0f, 1.0f);
Device.ImmediateContext.OutputMerger.SetTargets(m_depthStencilView, m_RenderTargetView);
SetShaderAndVertices(sizeInfo);
SetShaderAndVertices方法:( sizeInfo是他渲染目标的大小)
protected void SetShaderAndVertices(Size rendersize)
{
var device = this.Device;
var context = device.ImmediateContext;
ShaderBytecode shaderCode = GetShaderByteCode(eEffectType.Texture);
layout = new InputLayout(device, shaderCode, new[] {
new InputElement("SV_Position", 0, Format.R32G32B32A32_Float, 0, 0),
new InputElement("TEXCOORD", 0, Format.R32G32_Float, 32, 0),
});
// Write vertex data to a datastream
var stream = new DataStream(Utilities.SizeOf<VertexPositionTexture>() * 4, true, true);
stream.WriteRange(new[]
{
new VertexPositionTexture(
new Vector4(-1, 1, 0.0f, 1.0f), // position top-left
new Vector2(0f,0f)
),
new VertexPositionTexture(
new Vector4(1, 1, 0.0f, 1.0f), // position top-right
new Vector2(1,0)
),
new VertexPositionTexture(
new Vector4(-1, -1, 0.0f, 1.0f), // position bottom-left
new Vector2(0,1)
),
new VertexPositionTexture(
new Vector4(1, -1, 0.0f, 1.0f), // position bottom-right
new Vector2(1,1)
),
});
stream.Position = 0;
// Instantiate VertexPositionTexture buffer from vertex data
//
vertices = new SharpDX.Direct3D11.Buffer(device, stream, new BufferDescription()
{
BindFlags = BindFlags.VertexBuffer,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None,
SizeInBytes = Utilities.SizeOf<VertexPositionTexture>() * 4,
Usage = ResourceUsage.Default,
StructureByteStride = 0
});
stream.Dispose();
// Prepare All the stages
// for primitive topology https://msdn.microsoft.com/en-us/library/bb196414.aspx#ID4E2BAC
context.InputAssembler.InputLayout = (layout);
context.InputAssembler.PrimitiveTopology = (PrimitiveTopology.TriangleStrip);
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(vertices, Utilities.SizeOf<VertexPositionTexture>(), 0));
context.OutputMerger.SetTargets(m_RenderTargetView);
}
着色器文件:
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
cbuffer PerObject: register(b0)
{
float4x4 WorldViewProj;
};
// ------------------------------------------------------
// A shader that accepts Position and Texture
// ------------------------------------------------------
struct VertexShaderInput
{
float4 Position : SV_Position;
float2 TextureUV : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : SV_Position;
float2 TextureUV : TEXCOORD0;
};
VertexShaderOutput VSMain(VertexShaderInput input)
{
VertexShaderOutput output = (VertexShaderOutput)0;
output.Position = input.Position;
output.TextureUV = input.TextureUV;
return output;
}
float4 PSMain(VertexShaderOutput input) : SV_Target
{
return ShaderTexture.Sample(Sampler, input.TextureUV);
}
// ------------------------------------------------------
// A shader that accepts Position and Color
// ------------------------------------------------------
struct ColorVS_IN
{
float4 pos : SV_Position;
float4 col : COLOR;
};
struct ColorPS_IN
{
float4 pos : SV_Position;
float4 col : COLOR;
};
ColorPS_IN ColorVS(ColorVS_IN input)
{
ColorPS_IN output = (ColorPS_IN)0;
output.pos = input.pos;
output.col = input.col;
return output;
}
float4 ColorPS(ColorPS_IN input) : SV_Target
{
return input.col;
}
// ------------------------------------------------------
// Techniques
// ------------------------------------------------------
technique11 Color
{
pass P0
{
SetGeometryShader(0);
SetVertexShader(CompileShader(vs_5_0, ColorVS()));
SetPixelShader(CompileShader(ps_5_0, ColorPS()));
}
}
technique11 TextureLayer
{
pass P0
{
SetGeometryShader(0);
SetVertexShader(CompileShader(vs_5_0, VSMain()));
SetPixelShader(CompileShader(ps_5_0, PSMain()));
}
}
我希望能够拉伸图像,或者根据我的要求保持宽高比。
此外,我的纹理数据通过将位图数据映射到渲染目标来从另一个线程更新。
注意:如果映射的图像与我渲染的目标视图的大小相同,则纹理会填充整个渲染目标视图。
请参阅下面的屏幕转储:
截图: 第一屏:
第一个屏幕转储图像的大小: ----在尺寸(720,576)的渲染目标上显示图像尺寸(835,626)
第二屏:
第二个屏幕转储图像的大小: 在渲染目标尺寸(899,676)上显示图像尺寸(899,674)
更多信息然后让我知道,我很乐意提供。
感谢。
P.S: 我也在another forum发布了这个问题,但没有运气因此在这里发帖,希望有人能够指引我朝正确的方向发展。
使用C#,SharpDx与Directx11和D3DImage&amp;&amp;不使用Swapchains
另见下面是用于映射纹理数据的代码: Device.ImmediateContext.ClearRenderTargetView(this.m_RenderTargetView,Color4.Black);
Texture2DDescription colordesc = new Texture2DDescription
{
BindFlags = BindFlags.ShaderResource,
Format = PIXEL_FORMAT,
Width = iWidthOfImage,
Height = iHeightOfImage,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Dynamic,
OptionFlags = ResourceOptionFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
ArraySize = 1
};
Texture2D newFrameTexture = new Texture2D(this.Device, colordesc);
DataStream dtStream = null;
DataBox dBox = Device.ImmediateContext.MapSubresource(newFrameTexture, 0, MapMode.WriteDiscard, 0, out dtStream);
if (dtStream != null)
{
int iRowPitch = dBox.RowPitch;
for (int iHeightIndex = 0; iHeightIndex < iHeightOfImage; iHeightIndex++)
{
//Copy the image bytes to Texture
// we write row strides multiplies by bytes per pixel
// as our case is bgra32 which is 4 bytes
dtStream.Position = iHeightIndex * iRowPitch;
Marshal.Copy(decodedData, iHeightIndex * iWidthOfImage * 4, new IntPtr(dtStream.DataPointer.ToInt64() + iHeightIndex * iRowPitch), iWidthOfImage * 4);
}
}
Device.ImmediateContext.UnmapSubresource(newFrameTexture, 0);
Device.ImmediateContext.CopySubresourceRegion(newFrameTexture, 0, null, this.RenderTarget, 0);
答案 0 :(得分:0)
调整大小后,新窗口的宽高比与纹理不匹配,因此无法覆盖窗口的整个客户区域。您必须放大纹理几何体,以便在投影到相机平面时覆盖整个客户区域。这必然意味着纹理的一部分被剪裁。
基本上,在16:9监视器上观看4:3宽高比电视节目时,这是同样的问题。您可以缩放它以填充屏幕,从而裁剪或拉伸4:3内容,或者您可以在内容的任一侧用黑色标记4:3内容。