我正在开发一个基于Web服务器的可视化项目,我让它工作并看我想要它但它比我希望的要慢得多。基本上,它有一个巨大的网格用于建模空间,然后单个立方体在网格中显示不同的颜色(或者如果没有任何颜色,则当前只删除,但很容易只使用透明材料)来表示存在的可能性在网格的该区域中的对象,并且颜色需要在接收更多数据时动态地改变(当前未实现但在此时容易完成)。下面是我目前正在使用的代码(名为VFF.js):
//dimensions in feet
var xFeet = 20;
var yFeet = 10;
var zFeet = 15;
var certaintyGrid = [];
var gridSize = 6; //6 inch squares (higher number = lower resolution)
var objectThreshhold = 5;
//change the dimesnions to blocks/grid spaces
var xDim = Math.ceil(xFeet * 12 / gridSize);
var yDim = Math.ceil(yFeet * 12 / gridSize);
var zDim = Math.ceil(zFeet * 12 / gridSize);
//parrot ar.drone is 22.4 x 22.4 x 5.4 inches
var droneWidth = 22.4 / gridSize;
var droneLength = 22.4 / gridSize;
var droneHeight = 5.4 / gridSize;
//get the canvas and set its background
var container = document.getElementById("VFFCanvas");
container.style.background = '#cccccc';
//create the scene, renderer, and camera and then put it in the VFFCanvas
var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(45, container.width/container.height, 0.1, 1000);
var renderer = new THREE.WebGLRenderer( { antialias: false, canvas: container, alpha: true} );
renderer.setClearColor(new THREE.Color().setRGB( 0.95, 0.95, 0.95 ));
renderer.setSize(container.width, container.height);
renderer.enableScissorTest ( true );
document.body.appendChild(renderer.domElement);
//create the light source
var directionalLight = new THREE.DirectionalLight(0xffffff);
scene.add(directionalLight);
//create the drone object
var droneGeo = new THREE.CubeGeometry(droneWidth, droneHeight, droneLength);
var droneMat = new THREE.MeshLambertMaterial({color: 0x888888});
var drone = new THREE.Mesh(droneGeo, droneMat);
//drone.position.set(15,4,10);
drone.position.set(xDim / 2 - 1, 2, zDim / 2 - 1); //start the drone in the center of the grid close to the ground
scene.add(drone);
//maybe do a small far clipping for the top down so its easier to see things around you
//set up the camera and views
var chaseDistance = droneWidth * 8; //bigger = farther away
var chaseClipping = 1.2; //bigger = more not displayed
var topDownDroneScaledWidth = droneWidth * 15; //bigger # = farther away
var topDownHeightMargin = 4; //how many drones above or below it will cut off before it clips the squares away and not have them block the
//view of where we are
var views = [{ //top down cam
left: 0.505,
bottom: 0.01,
width: 0.485,
height: 0.485,
fov: 45,
closeClip: topDownDroneScaledWidth - droneHeight * topDownHeightMargin,
farClip: topDownDroneScaledWidth + droneHeight * topDownHeightMargin,
//background: new THREE.Color().setRGB( 0.6, 0.2, 0.2 ),
setup: function (camera) {
camera.rotation.x = -Math.PI/2;
},
updateCamera: function (camera, scene) {
//position it above the drone (need to be carefull if we are taking ceiling measurments or else our view will be blocked)
camera.position.x = drone.position.x;
camera.position.z = drone.position.z;
camera.position.y = drone.position.y + topDownDroneScaledWidth; //this height shows a decent view based on the drones size
camera.rotation.z = drone.rotation.y; //use the z because we are looking straight down
}
},{ //chase cam
left: 0.01,
bottom: 0.01,
width: 0.485,
height: 0.98,
fov: 45,
closeClip: chaseDistance * chaseClipping, //chaseDistance * sqrt(2) is our distance to the center of the drone
farClip: 10000,
//background: new THREE.Color().setRGB( 0.5, 0.5, 0.7 ),
setup: function (camera) {},
updateCamera: function (camera, scene) {
//find out wheres behind the drone
camera.position.x = drone.position.x + chaseDistance * Math.sin(drone.rotation.y);
camera.position.z = drone.position.z + chaseDistance * Math.cos(drone.rotation.y);
camera.position.y = drone.position.y + chaseDistance;
//focus on the drone
camera.lookAt(drone.position);
}
},{ //cockpit cam
left: 0.505,
bottom: 0.505,
width: 0.485,
height: 0.485,
fov: 45,
closeClip: 0.1,
farClip: 10000,
//background: new THREE.Color().setRGB( 0.3, 0.7, 0.3 ),
setup: function (camera) {
drone.add(camera);
camera.position.z = -droneLength / 2; //position it where the camera is on the ar drone
},
updateCamera: function (camera, scene) {}
}];
//initialize the views' cameras
for (var ii = 0; ii < views.length; ++ii ) {
var view = views[ii];
camera = new THREE.PerspectiveCamera( view.fov, container.width / container.height, view.closeClip, view.farClip );
view.camera = camera;
view.setup(camera);
view.left = Math.floor( container.width * view.left );
view.bottom = Math.floor( container.height * view.bottom );
view.width = Math.floor( container.width * view.width );
view.height = Math.floor( container.height * view.height );
}
//create the grid objects
var geometry = new THREE.CubeGeometry(0.9, 0.9, 0.9);
//var material = new THREE.MeshLambertMaterial({color: 0x0000ff, transparent: false, opacity: 0 });
for(i = 0; i < xDim; i++) {
certaintyGrid[i] = [];
for(j = 0; j < zDim; j++) {
certaintyGrid[i][j] = [];
for(k = 0; k < yDim; k++) {
//start them as non existent (no certainty) or else it could case errors
var material = new THREE.MeshLambertMaterial({color: 0x0000ff, transparent: false, opacity: 0 });
var cube = new THREE.Mesh(geometry, material);
cube.position.set(i,k,j);
material.certaintyValue = 0;
//this is just for testing - creates a wall of squares along the edges of the grid
if(j == 0 || i == 0 || k == 0 || j == zDim - 1 || k == yDim - 1 || i == xDim -1) {
material.certaintyValue = Math.floor(Math.random() * 220);
}
//keep our pointer to our object so we can add it later if it gets any certainty
certaintyGrid[i][j][k] = cube;
}
}
}
/* Attempt to merge the meshes
var geo = new THREE.Geometry();
var meshTest = new THREE.Mesh(geo, material);
for (i = 0; i < xDim; i++) {
for(j = 0; j < zDim; j++) {
for(k = 0; k < yDim; k++) {
THREE.GeometryUtils.merge(geo, certaintyGrid[i][j][k]);
}
}
}
scene.add(meshTest);
*/
//this is where it loops and updates the camera and scene
var render = function () {
requestAnimationFrame(render);
//testin stuff
drone.rotation.y += 0.01;
//makes it so the light is always comming from behind where the drone is facing
directionalLight.position.x = Math.sin(drone.rotation.y);
directionalLight.position.z = Math.cos(drone.rotation.y);
//update the cubes based on their certainty values (maybe make this "smarter" later so it only updates the ones changed)
for(i = 0; i < xDim; i++) {
for(j = 0; j < zDim; j++) {
for(k = 0; k < yDim; k++) {
var currMater = certaintyGrid[i][j][k].material;
if(currMater.certaintyValue > objectThreshhold) {
if(currMater.opacity != 1) {
if (currMater.transparent == false)
scene.add(certaintyGrid[i][j][k]);
currMater.transparent = false;
currMater.opacity = 1;
}
var red = (currMater.certaintyValue - objectThreshhold)/255;
var blue = (255 - (currMater.certaintyValue - objectThreshhold))/255;
currMater.color.setRGB(red, .2, blue);
} else if (currMater.certaintyValue < 1) {
if(currMater.opacity != 0) {
currMater.transparent = false;
currMater.opacity = 0;
scene.remove(certaintyGrid[i][j][k]);
}
} else {
if(currMater.opacity == 0 || currMater.opacity == 1) {
currMater.color.setHex(0x0000ff);
currMater.transparent = true;
if(currMater.opacity == 0) //only add it if we are going from no certainty
scene.add(certaintyGrid[i][j][k]);
}
currMater.opacity = 0.05 * (currMater.certaintyValue + 1);
}
}
}
}
//update the views and cameras
for ( var ii = 0; ii < views.length; ++ii ) {
view = views[ii];
camera = view.camera;
view.updateCamera(camera, scene);
renderer.setScissor( view.left, view.bottom, view.width, view.height );
renderer.setViewport( view.left, view.bottom, view.width, view.height );
//renderer.setClearColor( view.background );
camera.aspect = view.width / view.height;
camera.updateProjectionMatrix();
renderer.render( scene, camera );
}
};
//now actually get us started (puts us in the infinite run loop)
render();
另外,我在这个简单的HTML文件中运行它只是为了测试:
<html>
<head>
<title>VFF Vizualization Test</title>
</head>
<body>
<canvas id="VFFCanvas" width="640" height="480"></canvas>
<script src="three.min.js"></script>
<script src="VFF.js"></script>
</body>
</html>
我做了一些搜索并尝试了一些不同的方法来加快速度,包括使用更少的材料,但是如果有的话,它无助于提高运行速度(如果它有助于提高性能,可以很容易地使用几种不同的颜色)就色彩而言,需要大量的细节。此外,我尝试合并立方体的网格,并且它可以非常快速地加速,但是对于所有立方体而言,只有一种材料不会为此而工作。我在MeshFaceMaterial上看到了一些东西并认为它可能有用,但只看到它在单个非合并网格(通常是立方体)上实现,并且不确定它是否可以用于此应用程序或它如何工作分配材料。 / p>
我愿意接受任何有用的想法(不仅仅是MeshFaceMaterial),并感谢他们的帮助!
答案 0 :(得分:1)
我发现了一个特定的性能杀手:你通过在render()函数中添加和删除对象来修改场景。相反,您应该创建所需的所有对象,并将不需要的对象变为不可见或可见。使用可见性而不是透明度来使事物不可见。
另一个问题(与性能无关)是你正在移动定向灯......定向灯没有位置,但确实有方向。
最后,正如您在代码中所说,您应该真正优化该循环并仅更改您需要的内容。由于任何几何变化都必须在每帧昂贵的情况下传递到GPU,你真的不想做出你不需要做的改变,真的让你的render()循环保持快速尽可能。
我可能也会做出一个特别的改变,即添加一种按颜色构建材质的方法。如果你已经有了那种颜色,它会返回它,否则会产生那种颜色。它可能没多大帮助,但每一点都可以提供帮助。
答案 1 :(得分:0)
如果有人正在寻找如何将网格与多种材质合并,这就是我的功能。这将合并网格和重新索引材料,因此所有都应该正常工作(在r71上测试)。
function mergeMeshes (meshArr) {
var geometry = new THREE.Geometry(),
materials = [],
m,
materialPointer = 0,
reindex = 0;
for (var i = 0; i < meshArr.length; i++) {
m = meshArr[i];
if (m.material.materials) {
for (var j = 0; j < m.material.materials.length; j++) {
materials[materialPointer++] = m.material.materials[j];
}
} else if (m.material) {
materials[materialPointer++] = m.material;
}
geometry.merge(m.geometry, m.matrix, reindex);
reindex = materialPointer;
}
return new THREE.Mesh(geometry, new THREE.MeshFaceMaterial(materials));
};
这肯定会加速数千个静态对象的场景。合并对象后,它们只能转换为一个大型网格。