如何在WebGL中在3D视图和2D视图之间切换?
我有一个场景的3D视图,我也想显示一个2D视图,就像一个地图视图。如何在两种类型的视图之间切换?
答案 0 :(得分:4)
通常要从3d切换到2d,您只需使用orthographic projection而不是perspective projection。
如果你想要动画2之间的转换,似乎可以正常工作
const ortho = someOrthoFunc(left, right, top, bottom, orthoZNear, orthZFar);
const persp = somePerspFunc(fov, aspect, perspZNear, perspZFar);
const projection = [];
for (let i = 0; i < 16; ++i) {
projection[i] = lerp(ortho[i], persp[i], mixAmount);
}
function lerp(a, b, l) {
return a + (b - a) * l;
}
当你想要正交视图(2d-ish)时mixAmount
为0,而当你想要透视图(3d)时,mixAmount
为1,你可以在0和1之间设置动画。 / p>
请注意,如果您希望正交视图与透视图匹配,则需要选择与您的应用匹配的top
,bottom
,left
,right
值。对于在2个不同视图之间转换(比如地面上的第一个人与直接向下看),您可以选择您想要的任何设置。但是,假设你向下看,只是想以相同的视角查看从3D到2D。在这种情况下,您需要选择与给定数量的单位的透视图匹配的左,右,上,下。对于顶部和底部,可能有多少单元垂直安装在地面上#34;与相机的距离。
See this answer其中距离是到地面的距离,公式将为您提供该距离的单位数量的一半,然后您可以将其插入{{1 }和top
。对于bottom
和left
,只需乘以画布的显示尺寸
另一件变化是camera。定位相机的常用方法是使用right
函数,该函数可能会生成视图矩阵或相机矩阵。
向下看
lookAt
您为3D相机设置了不同的const cameraPosition = [x, groundHeight + distanceAboveGround, z];
const target = [x, groundHeight, z];
const up = [0, 0, 1];
const camera = someLookAtFunction(camearPosition, target, up);
,cameraPosition
,target
。您可以通过搜索这3个变量来为它们之间的过渡设置动画。
up
&#13;
const vs = `
uniform mat4 u_worldViewProjection;
attribute vec4 a_position;
attribute vec2 a_texcoord;
varying vec4 v_position;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = u_worldViewProjection * a_position;
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D u_texture;
void main() {
gl_FragColor = texture2D(u_texture, v_texcoord);
}
`;
"use strict";
twgl.setDefaults({attribPrefix: "a_"});
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.getElementById("c").getContext("webgl");
// compiles shaders, links program, looks up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for positions, texcoords
const bufferInfo = twgl.primitives.createCubeBufferInfo(gl);
// calls gl.createTexture, gl.bindTexture, gl.texImage2D, gl.texParameteri
const tex = twgl.createTexture(gl, {
min: gl.NEAREST,
mag: gl.NEAREST,
src: [
255, 0, 0, 255,
0, 192, 0, 255,
0, 0, 255, 255,
255, 224, 0, 255,
],
});
const settings = {
projectionMode: 2,
cameraMode: 2,
fov: 30,
};
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
const fov = settings.fov * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const perspZNear = 0.5;
const perspZFar = 10;
const persp = m4.perspective(fov, aspect, perspZNear, perspZFar);
// the size to make the orthographic view is arbitrary.
// here we're choosing the number of units at ground level
// away from the top perspective camera
const heightAboveGroundInTopView = 7;
const halfSizeToFitOnScreen = heightAboveGroundInTopView * Math.tan(fov / 2);
const top = -halfSizeToFitOnScreen;
const bottom = +halfSizeToFitOnScreen;
const left = top * aspect;
const right = bottom * aspect;
const orthoZNear = 0.5;
const orthoZFar = 10;
const ortho = m4.ortho(left, right, top, bottom, orthoZNear, orthoZFar);
let perspMixAmount;
let camMixAmount;
switch (settings.projectionMode) {
case 0: // 2d
perspMixAmount = 0;
break;
case 1: // 3d
perspMixAmount = 1;
break;
case 2: // animated
perspMixAmount = Math.sin(time) * .5 + .5;
break;
}
switch (settings.cameraMode) {
case 0: // top
camMixAmount = 0;
break;
case 1: // angle
camMixAmount = 1;
break;
case 2: // animated
camMixAmount = Math.sin(time) * .5 + .5;
break;
}
const projection = [];
for (let i = 0; i < 16; ++i) {
projection[i] = lerp(ortho[i], persp[i], perspMixAmount);
}
const perspEye = [1, 4, -6];
const perspTarget = [0, 0, 0];
const perspUp = [0, 1, 0];
const orthoEye = [0, heightAboveGroundInTopView, 0];
const orthoTarget = [0, 0, 0];
const orthoUp = [0, 0, 1];
const eye = v3.lerp(orthoEye, perspEye, camMixAmount);
const target = v3.lerp(orthoTarget, perspTarget, camMixAmount);
const up = v3.lerp(orthoUp, perspUp, camMixAmount);
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const viewProjection = m4.multiply(projection, view);
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const t = time * .1;
for (let z = -1; z <= 1; ++z) {
for (let x = -1; x <= 1; ++x) {
const world = m4.translation([x * 1.4, 0, z * 1.4]);
m4.rotateY(world, t + z + x, world);
// calls gl.uniformXXX
twgl.setUniforms(programInfo, {
u_texture: tex,
u_worldViewProjection: m4.multiply(viewProjection, world),
});
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
}
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
setupRadioButtons("proj", "projectionMode");
setupRadioButtons("cam", "cameraMode");
setupSlider("#fovSlider", "#fov", "fov");
function setupSlider(sliderId, labelId, property) {
const slider = document.querySelector(sliderId);
const label = document.querySelector(labelId);
function updateLabel() {
label.textContent = settings[property];
}
slider.addEventListener('input', e => {
settings[property] = parseInt(slider.value);
updateLabel();
});
updateLabel();
slider.value = settings[property];
}
function setupRadioButtons(name, property) {
document.querySelectorAll(`input[name=${name}]`).forEach(elem => {
elem.addEventListener('change', e => {
if (e.target.checked) {
settings[property] = parseInt(e.target.value);
}
});
});
}
function lerp(a, b, l) {
return a + (b - a) * l;
}
&#13;
body { margin: 0; }
canvas { display: block; width: 100vw; height: 100vh; }
#ui {
position: absolute;
left: 10px;
top: 10px;
z-index: 2;
background: rgba(255, 255, 255, 0.9);
padding: .5em;
}
&#13;