找到鼠标沿Three.js中的向量移动的距离

时间:2015-05-14 16:47:45

标签: javascript three.js

我试图找到鼠标沿着法线向量行进的距离。

这个想法是沿着相交面的法线向量在一个物体内移动一组顶点。

目前,我有一个onmousedown事件处理程序,它可以找到相交的面,相邻的面具有相同的法线,以及与这些面关联的顶点。我还有一个onmousemove事件处理程序,它沿着法线移动顶点。

现在,每次触发事件时,onmousemove只会沿着面法线移动顶点1个单位。我希望他们用鼠标移动。

我正在处理的代码主要来自three.js编辑器。非常感谢任何帮助,谢谢!

var object; // Set outside this code
var camera; // Set outside this code
var viewport; // Set outside this code
var raycaster = new THREE.Raycaster();
var point = new THREE.Vector2();
var mouse = new THREE.Vector2();
var _dragging = false;

var faces = [];
var vertices = [];

function onMouseDown(event) {

    if (object === undefined || _dragging === true) {
        return;
    }

    event.preventDefault();
    event.stopPropagation();

    var intersect = getIntersects(event, object)[0];

    if (intersect && intersect.face) {

        faces = getAdjacentNormalFaces(intersect.object.geometry, intersect.face);
        vertices = getFaceVertices(intersect.object.geometry, self.faces);

    }

    _dragging = true;

}

function onMouseMove(event) {

    if (object === undefined || vertices.length === 0 || _dragging === false) {
        return;
    }

    event.preventDefault();
    event.stopPropagation();

    var normal = faces[0].normal;

    /*
     * Get the distance to move the vertices
     */
    var distance = 1;

    var i;
    for (i = 0; i < self.vertices.length; i++) {

        self.vertices[i].x += (normal.x * distance);
        self.vertices[i].y += (normal.y * distance);
        self.vertices[i].z += (normal.z * distance);

    }

    object.geometry.verticesNeedUpdate = true;
    object.geometry.computeBoundingBox();
    object.geometry.computeBoundingSphere();

}

var getIntersects = function (event, object) {

    var rect = viewport.getBoundingClientRect();
    point.fromArray([
        ( event.clientX - rect.left ) / rect.width,
        ( event.clientY - rect.top ) / rect.height
    ]);

    mouse.set(( point.x * 2 ) - 1, -( point.y * 2 ) + 1);

    raycaster.setFromCamera(mouse, camera);
    if (object instanceof Array) {
        return raycaster.intersectObjects(object);
    }

    return raycaster.intersectObject(object);

};

var getAdjacentNormalFaces = function (geometry, face) {
    // Returns an array of all faces that are adjacent and share the same normal vector
};

var getFaceVertices = function (geometry, faces) {
    // Returns an array of vertices that belong to the array of faces
};

更新 作为总结...我有事件处理程序,需要移动的顶点集和顶点应该移动的法线向量。我需要的是根据鼠标的位置移动顶点的偏移距离。

我的第一个想法是创建一个垂直于法线向量的平面并跟踪该平面上的鼠标位置。但是,我不确定1.如何创建相机可见最大边的垂直平面,以及2.如何将平面上鼠标的x / y坐标转换为顶点应移动的距离。 / p>

2 个答案:

答案 0 :(得分:2)

我解决这个问题的方法是在2D平面上绘制零点和法线点,然后使用反斜率找到与法线相交的垂直线。然后我可以使用起点和交点来找到鼠标移动的距离。我还必须使用相机缩放最终距离。

快速参考:

// linear slope/intercept:  y = mx + b
// solve for b:             b = y - mx
// solve for m:             (y2 - y1) / (x2 - x1)
// get inverse slope:       -1 / m
// get intersect point:     (b2 - b1) / (m1 - m2)

可能有一种更简单的方法,但这就是我所做的,希望它可以帮助其他人:

关于Mousedown

  1. 将中心(0,0,0)向量,面法线向量和任意1个单位向量(1,0,0)投影到相机上并获得三个点的屏幕位置

    var zero2D = toScreenPosition(0, 0, 0);
    var one2D = toScreenPosition(1, 0, 0);
    var normal2D = toScreenPosition(intersect.face.normal.x, intersect.face.normal.y, intersect.face.normal.z);
    
    / ***** /
    var toScreenPosition = function (x, y, z) {
    
        var rect = viewport.getBoundingClientRect();
        var point = new THREE.Vector2();
    
        screenPositionVector.set(x || 0, y || 0, z || 0);
    
        screenPositionVector.project(camera);
        point.set((screenPositionVector.x + 1) / 2 * rect.width, -(screenPositionVector.y - 1) / 2 * rect.height);
    
        return point;
    
    };
    
  2. 存储鼠标起点和法线的x方向(1或-1)。

    start2D.set(event.clientX, event.clientY);
    normalDir = zero2D.x < normal2D.x ? 1 : -1;
    
  3. 存储零/法线的斜率和反斜率。

    slope = (normal2D.y - zero2D.y) / (normal2D.x - zero2D.x); // TODO: Handle zero slope
    inverseSlope = -1 / slope; // TODO: If slope is 0, inverse is infinity
    
  4. 根据鼠标坐标存储法线的y截距。

    startingYIntercept = event.clientY - (slope * event.clientX);
    
  5. 使用zero2D和one2D点查找相机比例。相机比例是两个2D点之间的距离除以两个3D点之间的距离(1)。

    cameraScale = one2D.distanceTo(zero2D);
    
  6. 为了更好的准确性,我们将根据总移动而不是事件处理程序调用之间的差值来移动顶点。因此,我们需要跟踪所有顶点的起始位置。

    startingVertices = [];
    var i;
    for (i = 0; i < vertices.length; i++) {
        startingVertices.push({x: vertices[i].x, y: vertices[i].y, z: vertices[i].z});
    }
    
  7. 关于Mousemove

    1. 使用鼠标位置和反斜率,找到垂直线的截距。

      var endingYIntercept = event.clientY - (inverseSlope * event.clientX);
      
    2. 使用截距方程找到法线和垂直线截距的x位置。

      var endingX = (endingYIntercept - startingYIntercept) / (slope / inverseSlope);
      
    3. 重新插入x以找到y点。由于线条在x处截取,因此可以使用法线或垂直线。根据此设置终点。

      var endingY = (slope * endingX) + startingYIntercept;
      end2D.set(endingX, endingY);
      
    4. 找出点之间的距离并除以相机比例。

      var distance = end2D.distanceTo(start2D) / cameraScale;
      
    5. 如果法线与鼠标移动方向相反,则将距离乘以-1。

      if ((normalDir > 0 && endingX < start2D.x) || (normalDir < 0 && endingX > start2D.x)) {
          distance = distance * -1;
      }
      
    6. 由于我们将顶点移动总距离而不是事件处理程序之间的差值,因此顶点更新代码略有不同。

      var i;
      for (i = 0; i < self.vertices.length; i++) {
      
          vertices[i].x = startingVertices[i].x + (normal.x * distance);
          vertices[i].y = startingVertices[i].y + (normal.y * distance);
          vertices[i].z = startingVertices[i].z + (normal.z * distance);
      
      }
      
    7. 鼠标上的额外信用

      1. 移动顶点时,几何体的中心不会更改,需要更新。要更新中心,我可以调用geometry.center(),但是,在Three.js中,几何体的位置基于其中心,因此这将有效地移动中心几何体在相反方向上的位置是顶点移动距离的一半。我不想要这个,我想在移动顶点时几何体保持在相同的位置。为此,我将第一个顶点的结束位置减去其起始位置除以2,并将该向量添加到几何体的位置。然后我重新定位几何体。

        if (_dragging && self.vertices.length > 0) {
            offset.set(self.vertices[0].x - startingVertices[0].x, self.vertices[0].y - startingVertices[0].y, self.vertices[0].z - startingVertices[0].z);
            offset.divideScalar(2);
        
            object.position.add(offset);
            object.geometry.center();
        }
        
      2. 全部合作

        var object; // Set outside this code
        var camera; // Set outside this code
        var viewport; // Set outside this code
        var raycaster = new THREE.Raycaster();
        var point = new THREE.Vector2();
        var mouse = new THREE.Vector2();
        var _dragging = false;
        
        var faces = [];
        var vertices = [];
        var startingVertices = [];
        
        var slope = 0;
        var inverseSlope;
        var startingYIntercept = 0;
        var normalDir = 1;
        var cameraScale = 1;
        
        var start2D = new THREE.Vector2();
        var end2D = new THREE.Vector2();
        
        var offset = new THREE.Vector3();
        
        var onMouseDown = function (event) {
        
            if (object === undefined || _dragging === true) {
                return;
            }
        
            event.preventDefault();
            event.stopPropagation();
        
            var intersect = getIntersects(event, object)[0];
        
            if (intersect && intersect.face) {
        
                var zero2D = toScreenPosition(0, 0, 0);
                var one2D = toScreenPosition(1, 0, 0);
                var normal2D = toScreenPosition(intersect.face.normal.x, intersect.face.normal.y, intersect.face.normal.z);
        
                start2D.set(event.clientX, event.clientY);
                normalDir = zero2D.x < normal2D.x ? 1 : -1;
                slope = (normal2D.y - zero2D.y) / (normal2D.x - zero2D.x); // TODO: Handle zero slope
                inverseSlope = -1 / slope; // TODO: If slope is 0, inverse is infinity
        
                startingYIntercept = event.clientY - (slope * event.clientX);
                cameraScale = one2D.distanceTo(zero2D);
        
                faces = getAdjacentNormalFaces(intersect.object.geometry, intersect.face);
                vertices = getFaceVertices(intersect.object.geometry, self.faces);
        
                startingVertices = [];
                var i;
                for (i = 0; i < vertices.length; i++) {
                    startingVertices.push({x: vertices[i].x, y: vertices[i].y, z: vertices[i].z});
                }
        
            }
        
            _dragging = true;
        
        }
        
        var onMouseMove = function (event) {
        
            if (object === undefined || vertices.length === 0 || _dragging === false) {
                return;
            }
        
            event.preventDefault();
            event.stopPropagation();
        
            var normal = faces[0].normal;
        
            var endingYIntercept = event.clientY - (inverseSlope * event.clientX);
            var endingX = (endingYIntercept - startingYIntercept) / (slope / inverseSlope);
            var endingY = (slope * endingX) + startingYIntercept;
            end2D.set(endingX, endingY);
        
            var distance = end2D.distanceTo(start2D) / cameraScale;
        
            if ((normalDir > 0 && endingX < start2D.x) || (normalDir < 0 && endingX > start2D.x)) {
                distance = distance * -1;
            }
        
            var i;
            for (i = 0; i < self.vertices.length; i++) {
        
                vertices[i].x = startingVertices[i].x + (normal.x * distance);
                vertices[i].y = startingVertices[i].y + (normal.y * distance);
                vertices[i].z = startingVertices[i].z + (normal.z * distance);
        
            }
        
            object.geometry.verticesNeedUpdate = true;
            object.geometry.computeBoundingBox();
            object.geometry.computeBoundingSphere();
        
        }
        
        var onMouseUp = function (event) {
        
            if (_dragging && vertices.length > 0) {
                offset.set(vertices[0].x - startingVertices[0].x, vertices[0].y - startingVertices[0].y, vertices[0].z - startingVertices[0].z);
                offset.divideScalar(2);
        
                object.position.add(offset);
                object.geometry.center();
            }
        
        }
        
        var getIntersects = function (event, object) {
        
            var rect = viewport.getBoundingClientRect();
            point.fromArray([
                ( event.clientX - rect.left ) / rect.width,
                ( event.clientY - rect.top ) / rect.height
            ]);
        
            mouse.set(( point.x * 2 ) - 1, -( point.y * 2 ) + 1);
        
            raycaster.setFromCamera(mouse, camera);
            if (object instanceof Array) {
                return raycaster.intersectObjects(object);
            }
        
            return raycaster.intersectObject(object);
        
        };
        
        var toScreenPosition = function (x, y, z) {
        
            var rect = viewport.getBoundingClientRect();
            var point = new THREE.Vector2();
        
            screenPositionVector.set(x || 0, y || 0, z || 0);
        
            screenPositionVector.project(camera);
            point.set((screenPositionVector.x + 1) / 2 * rect.width, -(screenPositionVector.y - 1) / 2 * rect.height);
        
            return point;
        
        };
        
        var getAdjacentNormalFaces = function (geometry, face) {
            // Returns an array of all faces that are adjacent and share the same normal vector
        };
        
        var getFaceVertices = function (geometry, faces) {
            // Returns an array of vertices that belong to the array of faces
        };
        

答案 1 :(得分:0)

您可以通过鼠标移动或动画框架实现两种方式。

UIView

或在你的动画中更新这些值我发现更准确。

onmouseMove(){
mouseX = ( event.clientX - windowHalfX ) / resistanceh;
mouseY = ( event.clientY - windowHalfY ) / resistancew; 
var raycaster = new THREE.Raycaster();
raycaster.setFromCamera(mouse, camera);

var intersects = raycaster.intersectObjects(objects);

if ( intersects.length > 0 ) {

    if(mousedown){

      //do your thing 
}