如何将从3D线投射的色带可视化为"始终面向前方"

时间:2016-06-02 13:57:00

标签: windows 3d visualization openinventor

我们目前有一条(曲线)线由3D空间中的点组成(在开放式发明人中,但这个问题的解决方案可能不是特定于产品的。)

我们想要绘制一个表面,该表面描述从该线正交投影的色带,并在旋转时始终面向相机。

(例如,用户将看到一条描述线条曲线的功能区,线条本身就是其一侧,然后当我们围绕场景旋转时,功能区将“旋转”#34;关于该行,以便始终面向用户)

我们有:

  • 相机的位置和旋转

  • 沿线的每个点的位置

我们认为接近的方法是,对于每对相邻点,找到与垂直于摄像机的平面相交的线和与这些点之间的线垂直的平面,并沿该线投射设定距离,但这并没有给我们任何接近正确结果的地方。 (下面的故障代码)。

这种方法是否正确,如果是,那么下面的代码应该描述什么呢?

如果没有,我们如何实现核心目标(功能区始终面向用户)?

SbVec3f getOuterPoint(SbVec3f p3, const float ribbon_width, float cosine, float sine)
{
    return SbVec3f(p3[0] + ribbon_width*cosine, p3[1] - ribbon_width*sine, p3[2]);
}

SbVec3f getOuterPoint(SbVec3f old_p3, SbVec3f other_point, const float ribbon_width)
{
    float oangle = atan2(old_p3[1] - other_point[1], old_p3[0] - other_point[0]);
    float ocosine = cosf(oangle);
    float osine = sinf(oangle);
    return getOuterPoint(old_p3, ribbon_width, ocosine, osine);
}

SbVec3f getOuterPoint(SbVec3f p0, SbVec3f p1, const float ribbon_width, SoCamera* camera)
{
    SbVec3f axis;
    float angle;
    SoSFRotation camera_rotation = camera->orientation;
    camera_rotation.getValue(axis, angle);
    //std::cout << axis[0] << " " << axis[1] << " " << axis[2] << ":" << angle << std::endl;

    const SbVec3f &camera_position = camera->position.getValue();

    SbVec3f camera_axis = axis;

    SbVec3f well_axis = p1 - p0;

    well_axis.normalize();
    camera_axis.normalize();

    float cam_constant[3] = { -p1[0], -p1[1], -p1[2] };
    float well_constant[3] = { -p1[0], -p1[1], -p1[2] };

    /*

    //float p1_constant = camera_axis[0] * p1[0] + camera_axis[1] * p1[1] + camera_axis[2] * p1[2]
    //  - (camera_axis[0] * camera_position[0] + camera_axis[1] * camera_position[1] + camera_axis[2] * camera_position[2]);

    //// X, Y, Z are unknown
    //float line_unknown = camera_axis[0] * X + camera_axis[1] * Y + camera_axis[2] * Z;

    // 
    //
    // camera_axis.x * (x - p1[0]) + camera_axis.y * (y - p1[1]) + camera_axis.z * (z - p1[2]) = 0      (1)
    // well_axis.x   * (x - p1[0]) + well_axis.y   * (y - p1[1]) + well_axis.z   * (z - p1[2]) = 0      (2)

    // let z become free variable t

    // camera_axis.x * (x - p1[0]) + camera_axis.y * (y - p1[1]) = - camera_axis.z * (t - p1[2]) 
    // well_axis.x   * (x - p1[0]) + well_axis.y   * (y - p1[1]) = - well_axis.z   * (t - p1[2]) 

    // camera_axis.x * (x - p1[0]) + camera_axis.y * (y - p1[1]) = - camera_axis.z * t - camera_axis.z * p1[2] 
    // well_axis.x * (x - p1[0]) + well_axis.y * (y - p1[1]) = - well_axis.z * t - well_axis.z * p1[2] 

    // camera_axis.x * x  + camera_axis.y * y  = - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1] (3)
    // well_axis.x * x  + well_axis.y * y  = - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]               (4)

    (3) * well_axis.x:

    well_axis.x * camera_axis.x * x  + well_axis.x * camera_axis.y * y  = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1])

    (4) * camera_axis.x
    camera_axis.x * well_axis.x * x  + camera_axis.x * well_axis.y * y  = camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])

    Subtracting
    well_axis.x * camera_axis.y * y - camera_axis.x * well_axis.y * y  = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])

    (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y) * y = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])
    y = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)


    (3) * well_axis.y
    well_axis.y * camera_axis.x * x  + well_axis.y * camera_axis.y * y  = well_axis.y * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1])
    (4) * camera_axis.y
    camera_axis.y * well_axis.x * x  + camera_axis.y * well_axis.y * y  = camera_axis.y * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1])

    Subtracting
    x = well_axis.y * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.y * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / well_axis.y * camera_axis.x  - camera_axis.y * well_axis.x


    So:
    x = well_axis.y * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.y * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / (well_axis.y * camera_axis.x - camera_axis.y * well_axis.x)
    y = well_axis.x * ( - camera_axis.z * t - camera_axis.z * p1[2] + camera_axis.x  *p1[0] + camera_axis.y  * p1[1]) - camera_axis.x * ( - well_axis.z * t - well_axis.z * p1[2] + well_axis.x  *p1[0] + well_axis.y  * p1[1]) / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)
    z = t


    x = ((well_axis.z * camera_axis.y - camera_axis.z * well_axis.y) * t
    - camera_axis.z * well_axis.y * p1[2]
    + camera_axis.x  * well_axis.y * p1[0]
    + well_axis.z * camera_axis.y * p1[2]
    - well_axis.x * camera_axis.y * p1[0] ) 
    / (well_axis.y * camera_axis.x - camera_axis.y * well_axis.x)

    y =  ( - camera_axis.z * well_axis.x * t - camera_axis.z * well_axis.x * p1[2] + camera_axis.x  * well_axis.x * p1[0] + camera_axis.y  * well_axis.x * p1[1] + well_axis.z * camera_axis.x * t + well_axis.z * camera_axis.x * p1[2] - well_axis.x  * camera_axis.x * p1[0] - well_axis.y * camera_axis.x * p1[1]) / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)


    y =  ((well_axis.z * camera_axis.x - camera_axis.z * well_axis.x) * t 
    - camera_axis.z * well_axis.x * p1[2]
    + camera_axis.y  * well_axis.x * p1[1] 
    + well_axis.z * camera_axis.x * p1[2]
    - well_axis.y * camera_axis.x * p1[1])
    / (well_axis.x * camera_axis.y - camera_axis.x * well_axis.y)


    // Given these two equations, we now have a parameterised equation

    // (x,y,z) = (mt + a, nt + b, t) = (m, n, 1)t + (a + b + 0)
    // 
    // m = ((well_axis[2] * camera_axis[1] - camera_axis[2] * well_axis[1])) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0])
    // 
    // n = ((well_axis[2] * camera_axis[0] - camera_axis[2] * well_axis[0])) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1])
    // 
    // a = (- camera_axis[2] * well_axis[1] * p1[2] + camera_axis[0] * well_axis[1] * p1[0] + well_axis[2] * camera_axis[1] * p1[2] - well_axis[0] * camera_axis[1] * p1[0]) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0])
    // 
    // b = (- camera_axis[2] * well_axis[0] * p1[2] +camera_axis[1] * well_axis[0] * p1[1]  + well_axis[2] * camera_axis[0] * p1[2] - well_axis[1] * camera_axis[0] * p1[1]) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1])
*/

    float m = ((well_axis[2] * camera_axis[1] - camera_axis[2] * well_axis[1])) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0]);
    // 
    float n = ((well_axis[2] * camera_axis[0] - camera_axis[2] * well_axis[0])) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1]);
    // 
    float a = (-camera_axis[2] * well_axis[1] * p1[2] + camera_axis[0] * well_axis[1] * p1[0] + well_axis[2] * camera_axis[1] * p1[2] - well_axis[0] * camera_axis[1] * p1[0]) / (well_axis[1] * camera_axis[0] - camera_axis[1] * well_axis[0]);
    // 
    float b = (-camera_axis[2] * well_axis[0] * p1[2] + camera_axis[1] * well_axis[0] * p1[1] + well_axis[2] * camera_axis[0] * p1[2] - well_axis[1] * camera_axis[0] * p1[1]) / (well_axis[0] * camera_axis[1] - camera_axis[0] * well_axis[1]);

    float t = 2;

    return SbVec3f(m * t + a, n * t + b, t);
}

void setVertices(WellBore * pWell, SoVertexProperty * vertex_property, SoCamera* camera)
{
    int nPoints = pWell->nPoints;

    const float ribbon_width = 50.0f;

    int vertex_index = 0;
    int face_index = 0;

    int max_to_draw = nPoints;
    vertex_property->vertex.deleteValues(max_to_draw);

    SbVec3f on_well0x = pWell->points[1];
    SbVec3f in_space0x = getOuterPoint(pWell->points[0], on_well0x, ribbon_width, camera);

    for (int i = 0; i < max_to_draw - 1; ++i)
    {
        SbVec3f on_well0 = pWell->points[i];
        SbVec3f on_well1 = pWell->points[i + 1];

        SbVec3f in_space1 = getOuterPoint(on_well0, on_well1, ribbon_width, camera);

        vertex_property->vertex.set1Value(vertex_index + 0, in_space0x);
        vertex_property->vertex.set1Value(vertex_index + 1, on_well0x);
        vertex_property->vertex.set1Value(vertex_index + 2, on_well1);
        vertex_property->vertex.set1Value(vertex_index + 3, in_space0x);
        vertex_property->vertex.set1Value(vertex_index + 4, in_space1);

        vertex_index += 5;

        on_well0x = on_well1;
        in_space0x = in_space1;
    }
}

void cameraDebug(SoXtViewer * myViewer, WellBore* pWell)
{
    SoCamera* camera = myViewer->getCamera();

    SbVec3f camera_position = camera->position.getValue();
    //std::cout << camera_position[0] << " " << camera_position[1] << " " << camera_position[2] << std::endl;

    SbVec3f axis;
    float angle;
    SoSFRotation camera_rotation = camera->orientation;
    camera_rotation.getValue(axis, angle);
    //std::cout << axis[0] << " " << axis[1] << " " << axis[2] << ":" << angle << std::endl;

    SoNode* node = SoNode::getByName(SbName("points"));
    SbString str;
    SoVertexProperty* vertices = static_cast<SoVertexProperty*>(static_cast<SoVertexShape*>(node)->vertexProperty.getValue());
    //std::cout << vertices->vertex.getNum() << str << std::endl;

    setVertices(pWell, vertices, camera);
}

1 个答案:

答案 0 :(得分:0)

对您的问题的一个狭隘的具体答案相对容易。你问题中陈述的方法听起来是正确的,但你提供的代码似乎太复杂了...... 简而言之:您有一个矢量由带状路径中的两个连续点定义,另一个矢量由摄像机方向定义。你需要的矢量只是这两个矢量的交叉乘积。相机位置无关紧要,只有方向很重要。使用Open Inventor,您必须使用默认方向向量和当前相机方向计算摄像机方向,如下所示:

// Camera (reverse) direction vector
SbVec3f defVec(0,0,1), camVec;
const SbRotation& camRot = camera->orientation.getValue();
camRot.multVec( defVec, camVec );
camVec.normalize();

如果&#39; verts&#39;是带状路径,然后对于路径的每个段,您有两个坐标,并且可以计算两个额外的偏移坐标,使得四个坐标定义面向用户的矩形多边形,如:

SbVec3f ribVec, orthoVec;
for (int i = 0; i < numSegs; ++i) {
  ribVec = verts[i+1] - verts[i];
  ribVec.normalize();
  orthoVec = camVec.cross(ribVec);
  orthoVec.normalize();
  verts2[i*4  ] = verts[i];  // i*4 because 4 verts per segment
  verts2[i*4+1] = verts[i+1];
  verts2[i*4+2] = verts[i+1] + ribbonWidth * orthoVec;
  verts2[i*4+3] = verts[i  ] + ribbonWidth * orthoVec;

现在你可以处理更难的部分 - 如何处理&#34;关节&#34;这些多边形之间所以丝带看起来不错......