对于使用Oculus Rift C ++ SDK的任何人来说,这是一个问题。
SDK版本:ovr_sdk_win_1.20.0_public
IDE:VS2013
我正在尝试解决代码中的深度缓冲问题,以便为Oculus Rift DK2简化3D引擎。
随SDK提供的OculusRoomTiny示例非常复杂,因为它在设计时考虑了多功能性。所以,我采用了SuperMinimal示例代码并将其作为我的引擎的基础。 SuperMinimal Sample不支持多色顶点或深度缓冲。所以我的代码包括两者。
我在PC应用程序上使用Microsoft DirectX-11库创建深度缓冲区没有问题,所以认为这是Oculus特定的挑战。注意:OculusRoomTiny示例深度缓冲区工作正常,但封装在对我的代码来说过于复杂的类中。
在我的代码中,多维数据集呈现为预期,但包含深度缓冲区仅呈现世界背景颜色。
我从OculusRoomTiny_Advanced演示中删除了深度缓冲区代码,并确定我已经将它整合了。 我发布了我的评论代码。请注意,它是超级最小的,不会清理COM对象。
任何建议都会受到欢迎,因为我现在已经玩了6周的代码了!
#include "d3d11.h"
#include "d3dcompiler.h"
#include "OVR_CAPI_D3D.h"
#include "DirectXMath.h"
#include "models.h"
using namespace DirectX;
#pragma comment(lib, "d3dcompiler.lib")
#pragma comment(lib, "dxgi.lib")
#pragma comment(lib, "d3d11.lib")
void CreateSampleModel(ID3D11Device * Device, ID3D11DeviceContext *
Context);
void RenderSampleModel(XMMATRIX * viewProj, ID3D11Device * Device, ID3D11DeviceContext * Context);
ID3D11Buffer * IndexBuffer;
ID3D11RenderTargetView * eyeRenderTexRtv[2][3];
ID3D11DepthStencilView *zbuffer[2]; // containing one for each eye
ovrLayerEyeFov ld = { { ovrLayerType_EyeFov } }; //Only using one layer Update this to an array if using several
ovrSession session;
ID3D11Device * Device;
ID3D11DeviceContext * Context;
void CreateDepthBufferForBothEyes(int eye)
{
// Create the Depth Buffer Texture
D3D11_TEXTURE2D_DESC texd;
ZeroMemory(&texd, sizeof(texd));
texd.Width = ld.Viewport[eye].Size.w;
texd.Height = ld.Viewport[eye].Size.h;
texd.ArraySize = 1;
texd.MipLevels = 1;
texd.SampleDesc.Count = 1; // This matches the RTV
texd.Format = DXGI_FORMAT_D32_FLOAT;
texd.BindFlags = D3D11_BIND_DEPTH_STENCIL;
ID3D11Texture2D *pDepthBuffer;
Device->CreateTexture2D(&texd, NULL, &pDepthBuffer);
// Describe specific properties of the Depth Stencil Buffer
D3D11_DEPTH_STENCIL_VIEW_DESC dsvd;
ZeroMemory(&dsvd, sizeof(dsvd));
dsvd.Format = DXGI_FORMAT_D32_FLOAT; // Make the same as the texture format
dsvd.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
Device->CreateDepthStencilView(pDepthBuffer, &dsvd, &zbuffer[eye]);
pDepthBuffer->Release();
}
//-------------------------------------------------------------------------------------------------
int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE, LPWSTR, int)
{
// Init Rift and device
ovr_Initialize(0);
ovrGraphicsLuid luid;
ovr_Create(&session, &luid);
IDXGIFactory * DXGIFactory; CreateDXGIFactory1(__uuidof(IDXGIFactory), (void**)(&DXGIFactory));
IDXGIAdapter * DXGIAdapter; DXGIFactory->EnumAdapters(0, &DXGIAdapter);
D3D11CreateDevice(DXGIAdapter, D3D_DRIVER_TYPE_UNKNOWN, 0, 0, 0, 0, D3D11_SDK_VERSION, &Device, 0, &Context);
// Create eye render buffers
for (int eye = 0; eye < 2; eye++)
{
ld.Fov[eye] = ovr_GetHmdDesc(session).DefaultEyeFov[eye];
ld.Viewport[eye].Size = ovr_GetFovTextureSize(session, (ovrEyeType)eye, ld.Fov[eye], 1.0f);
ovrTextureSwapChainDesc desc = {};
desc.Type = ovrTexture_2D;
desc.ArraySize = 1;
desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
desc.Width = ld.Viewport[eye].Size.w;
desc.Height = ld.Viewport[eye].Size.h;
desc.MipLevels = 1;
desc.SampleCount = 1;
desc.StaticImage = ovrFalse;
desc.MiscFlags = ovrTextureMisc_DX_Typeless;
desc.BindFlags = ovrTextureBind_DX_RenderTarget;
ovr_CreateTextureSwapChainDX(session, Device, &desc, &ld.ColorTexture[eye]);
int textureCount = 0; ovr_GetTextureSwapChainLength(session, ld.ColorTexture[eye], &textureCount);
for (int j = 0; j < textureCount; j++) // Creates 3 backbuffers for each eye
{
ID3D11Texture2D* tex; ovr_GetTextureSwapChainBufferDX(session, ld.ColorTexture[eye], j, IID_PPV_ARGS(&tex));
D3D11_RENDER_TARGET_VIEW_DESC rtvd = { DXGI_FORMAT_R8G8B8A8_UNORM, D3D11_RTV_DIMENSION_TEXTURE2D };
Device->CreateRenderTargetView(tex, &rtvd, &eyeRenderTexRtv[eye][j]);
}
CreateDepthBufferForBothEyes(eye);
}
// Create sample model to be rendered in VR
CreateSampleModel(Device,Context);
// Loop for some frames, then terminate
float camX = 0.0f;
float camY = 0.0f;
float camZ = 0.0f;
for (long long frameIndex = 0; frameIndex < 1000;)
{
if (GetKeyState(VK_LEFT) & 0x8000)
camX -= 0.001f;
if (GetKeyState(VK_RIGHT) & 0x8000)
camX += 0.001f;
if (GetKeyState(VK_UP) & 0x8000)
camY += 0.001f;
if (GetKeyState(VK_DOWN) & 0x8000)
camY -= 0.001f;
if (GetKeyState(VK_NEXT) & 0x8000)
camZ += 0.001f;
if (GetKeyState(VK_PRIOR) & 0x8000)
camZ -= 0.001f;
// Get pose using a default IPD
ovrPosef HmdToEyePose[2] = {{{0,0,0,1}, {-0.032f, 0, 0}},
{{0,0,0,1}, {+0.032f, 0, 0}}};
ovrPosef pose[2]; ovr_GetEyePoses(session, 0, ovrTrue, HmdToEyePose, pose, &ld.SensorSampleTime);
//for (int eye = 0; eye < 2; eye++)
// ld.RenderPose[eye] = pose[eye]; // Update the Layer description with the new head position for each eye
// Render to each eye
for (int eye = 0; eye < 2; eye++)
{
ld.RenderPose[eye] = pose[eye]; // Update the Layer description with the new head position for each eye
// Set and clear current render target, and set viewport
int index = 0; ovr_GetTextureSwapChainCurrentIndex(session, ld.ColorTexture[eye], &index);
Context->OMSetRenderTargets(1, &eyeRenderTexRtv[eye][index], zbuffer[eye]); // zbuffer[eye]
Context->ClearRenderTargetView(eyeRenderTexRtv[eye][index], new float[]{ 0.1f, 0.0f, 0.0f, 0.0f });
Context->ClearDepthStencilView(zbuffer[eye], D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0, 0);
D3D11_VIEWPORT D3Dvp; // = { 0, 0, (float)ld.Viewport[eye].Size.w, (float)ld.Viewport[eye].Size.h };
D3Dvp.TopLeftX = 0.0f;
D3Dvp.TopLeftY = 0.0f;
D3Dvp.Width = (float)ld.Viewport[eye].Size.w;
D3Dvp.Height = (float)ld.Viewport[eye].Size.h;
D3Dvp.MinDepth = 0;
D3Dvp.MaxDepth = 1;
Context->RSSetViewports(1, &D3Dvp);
pose[eye].Position.z = 2.0f + camZ; // Move camera 2m from cube
pose[eye].Position.x = camX;
pose[eye].Position.y = camY;
// Calculate view and projection matrices using pose and SDK
XMVECTOR rot = XMLoadFloat4((XMFLOAT4 *)&pose[eye].Orientation);
XMVECTOR pos = XMLoadFloat3((XMFLOAT3 *)&pose[eye].Position);
XMVECTOR up = XMVector3Rotate(XMVectorSet(0, 1, 0, 0), rot);
XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, 1, 0), rot);
XMMATRIX view = XMMatrixLookAtLH(pos, XMVectorAdd(pos, forward), up);
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], 0, 1, ovrProjection_None);
XMMATRIX proj = XMMatrixTranspose(XMLoadFloat4x4((XMFLOAT4X4 *)&p));
// Render model and commit frame
RenderSampleModel(&XMMatrixMultiply(view, proj), Device, Context);
ovr_CommitTextureSwapChain(session, ld.ColorTexture[eye]);
}
// Send rendered eye buffers to HMD, and increment the frame if we're visible
ovrLayerHeader* layers[1] = { &ld.Header };
if (ovrSuccess == ovr_SubmitFrame(session, 0, nullptr, layers, 1))
frameIndex; // was frameIndex++; but changed to loop forever
}
ovr_Shutdown();
}
//---------------------------------------------------------------------------------------
// THIS CODE IS NOT SPECIFIC TO VR OR THE SDK, JUST USED TO DRAW SOMETHING IN VR
//---------------------------------------------------------------------------------------
void CreateSampleModel(ID3D11Device * Device, ID3D11DeviceContext * Context)
{
// Create Vertex Buffer
//#define V(n) (n&1?+1.0f:-1.0f), (n&2?-1.0f:+1.0f), (n&4?+1.0f:-1.0f)
//float vertices[] = { V(0), V(3), V(2), V(6), V(3), V(7), V(4), V(2), V(6), V(1), V(5), V(3), V(4), V(1), V(0), V(5), V(4), V(7) };
D3D11_BUFFER_DESC vbDesc = { sizeof(VERTEX) * NUM_OF_VERTICES, D3D11_USAGE_DEFAULT, D3D11_BIND_VERTEX_BUFFER };
D3D11_SUBRESOURCE_DATA initData = { tList };
ID3D11Buffer* VertexBuffer;
Device->CreateBuffer(&vbDesc, &initData, &VertexBuffer);
//create the index buffer
D3D11_BUFFER_DESC bd;
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(short) * NUM_OF_INDICES; // 3 per triangle, 12 triangles
bd.BindFlags = D3D11_BIND_INDEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
Device->CreateBuffer(&bd, NULL, &IndexBuffer);
D3D11_MAPPED_SUBRESOURCE ms;
Context->Map(IndexBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, indexes, NUM_OF_INDICES * sizeof(short)); // copy the data
Context->Unmap(IndexBuffer, NULL);
// Create Vertex Shader
char* vShader = "float4x4 m;"
"struct VOut { "
" float4 position : SV_POSITION;"
" float4 color : COLOR;"
"}; "
"VOut VS(float4 p1 : POSITION, float4 colour: COLOR)"
"{"
" VOut output;"
" output.position = mul(m, p1);"
" output.color = colour;"
" return output;"
"}";
ID3D10Blob * pBlob; D3DCompile(vShader, strlen(vShader), "VS", 0, 0, "VS", "vs_4_0", 0, 0, &pBlob, 0);
ID3D11VertexShader * VertexShader;
Device->CreateVertexShader(pBlob->GetBufferPointer(), pBlob->GetBufferSize(), 0, &VertexShader);
// Create Input Layout
D3D11_INPUT_ELEMENT_DESC elements[] = {
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_B8G8R8A8_UNORM, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
ID3D11InputLayout * InputLayout;
Device->CreateInputLayout(elements, 2, pBlob->GetBufferPointer(), pBlob->GetBufferSize(), &InputLayout);
// Create Pixel Shader
char* pShader = "float4 PS(float4 position : POSITION, float4 colour : COLOR) : SV_Target { return colour; }";
D3DCompile(pShader, strlen(pShader), "PS", 0, 0, "PS", "ps_4_0", 0, 0, &pBlob, 0);
ID3D11PixelShader * PixelShader;
Device->CreatePixelShader(pBlob->GetBufferPointer(), pBlob->GetBufferSize(), 0, &PixelShader);
Context->IASetInputLayout(InputLayout);
Context->IASetIndexBuffer(IndexBuffer, DXGI_FORMAT_R16_UINT, 0);
UINT stride = sizeof(float) * 4U; // 7U
UINT offset = 0;
Context->IASetVertexBuffers(0, 1, &VertexBuffer, &stride, &offset);
Context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
Context->VSSetShader(VertexShader, 0, 0);
Context->PSSetShader(PixelShader, 0, 0);
}
//------------------------------------------------------
void RenderSampleModel(XMMATRIX * viewProj, ID3D11Device * Device, ID3D11DeviceContext * Context)
{
D3D11_BUFFER_DESC desc = { sizeof(XMMATRIX), D3D11_USAGE_DYNAMIC, D3D11_BIND_CONSTANT_BUFFER, D3D11_CPU_ACCESS_WRITE };
D3D11_SUBRESOURCE_DATA initData = { viewProj };
ID3D11Buffer * ConstantBuffer; Device->CreateBuffer(&desc, &initData, &ConstantBuffer);
Context->VSSetConstantBuffers(0, 1, &ConstantBuffer);
Context->DrawIndexed(NUM_OF_INDICES, 0, 0);
}
#ifndef MODELS_H
#define MODELS_H
#include "DirectXMath.h"
using namespace DirectX;
#define NUM_OF_MODELS 1
struct VERTEX{
float x;
float y;
float z;
uint32_t C; // Colour
};
#define NUM_OF_VERTICES 24
#define NUM_OF_INDICES 36
extern VERTEX tList[];
extern short indexes[];
#endif
#include "models.h"
VERTEX tList[] = {
{ -0.1f, 0.1f, 0.2f, 0xFF0000FF }, // Cube Vertex Index 0
{ 0.1f, 0.1f, 0.2f, 0xFF0000FF }, // 1
{ -0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 2
{ 0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 3
{ -0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 4
{ 0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 5
{ -0.1f, -0.1f, -0.0f, 0x00FF00FF }, // 6
{ 0.1f, -0.1f, -0.0f, 0x00FF00FF }, // 7
{ 0.1f, 0.1f, 0.2f, 0x0000FFFF }, // 8
{ 0.1f, 0.1f, -0.0f, 0x0000FFFF }, // 9
{ 0.08f, -0.1f, 0.2f, 0x0000FFFF }, // 10 Distorted in prep to test Depth
{ 0.08f, -0.1f, -0.0f, 0x0000FFFF }, // 11 Distorted in prep to test Depth
{ -0.1f, 0.1f, 0.2f, 0xFF0000FF }, // 12
{ -0.1f, 0.1f, -0.0f, 0xFF0000FF }, // 13
{ -0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 14
{ -0.1f, -0.1f, -0.0f, 0xFF0000FF }, // 15
{ -0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 16
{ 0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 17
{ -0.1f, 0.1f, 0.2f, 0x00FF00FF }, // 18
{ 0.1f, 0.1f, 0.2f, 0x00FF00FF }, // 19
{ -0.1f, -0.1f, -0.0f, 0x00FFFFFF }, // 20
{ 0.1f, -0.1f, -0.0f, 0x00FFFFFF }, // 21
{ -0.1f, -0.1f, 0.2f, 0x00FFFFFF }, // 22
{ 0.1f, -0.1f, 0.2f, 0x00FFFFFF }, // 23
};
short indexes[] = {
0, 1, 2, // FRONT QUAD
2, 1, 3,
5, 4, 6, // BACK QUAD
5, 6, 7,
8, 9, 11, // RIGHT QUAD
8, 11, 10,
13, 12,14, // LEFT QUAD
13, 14, 15,
18,16, 17, // TOP QUAD
18, 17, 19,
22, 23, 21, // BOTTOM QUAD
22, 21, 20
};
答案 0 :(得分:0)
经过大量研究,我找到了自己的解决方案:
该行
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], 0, 1, ovrProjection_None);
创建投影矩阵。 A只是一个正确设置投影深度的情况
float zNear = 0.1f;
float zFar = 1000.0f;
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], zNear, zFar, ovrProjection_None);
令人惊讶的是,现在进行了7周的调查,发现这个漏洞只是一个简单的遗漏!