我想通过以下三种方法实现颜色选择/选择:
void ColorIndex(uint colorIndex)
{
glColor4ubv((GLubyte *)&colorIndex);
}
void ColorIndices(vector<uint> &colorIndices)
{
GLubyte *colorPtr = (GLubyte *)&colorIndices[0];
glColorPointer(4, GL_UNSIGNED_BYTE, 0, colorPtr);
}
void ReadSelectedIndices(int x, int y, int width, int height, uint *selectedIndices)
{
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, selectedIndices);
}
它们基本上将无符号整数(32位)打包成4个字节R,G,B,A。与GL_SELECT相比,它简单而快速。
问题是它有时不起作用。在带有NVIDIA 9800显卡的Windows上,它不适用于我。它适用于Mac / Windows上的ATI Radeon 4670。
我认为问题出在alpha通道中,所以我尝试了这个实现:
union RgbColor
{
GLuint colorIndex : 24;
GLubyte components[3];
struct
{
GLubyte r;
GLubyte g;
GLubyte b;
};
};
void ColorIndex(uint colorIndex)
{
RgbColor color;
color.colorIndex = colorIndex;
glColor3ubv(color.components);
}
vector<GLubyte> colorComponents;
void ColorIndices(vector<uint> &colorIndices)
{
colorComponents.clear();
for (uint i = 0; i < colorIndices.size(); i++)
{
RgbColor color;
color.colorIndex = colorIndices[i];
colorComponents.push_back(color.components[0]);
colorComponents.push_back(color.components[1]);
colorComponents.push_back(color.components[2]);
}
GLubyte *colorPtr = (GLubyte *)&colorComponents[0];
glColorPointer(3, GL_UNSIGNED_BYTE, 0, colorPtr);
}
const uint kMaxSelectedIndicesCount = 2000 * 2000; // max width * max height resolution
GLubyte colorBuffer[kMaxSelectedIndicesCount * 3];
void ReadSelectedIndices(int x, int y, int width, int height, uint *selectedIndices)
{
uint count = (uint)width * (uint)height;
memset(colorBuffer, 0, count * 3);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glReadPixels(x, y, width, height, GL_RGB, GL_UNSIGNED_BYTE, colorBuffer);
GetGLError();
for (uint i = 0; i < count; i++)
{
RgbColor color;
color.components[0] = colorBuffer[i * 3 + 0];
color.components[1] = colorBuffer[i * 3 + 1];
color.components[2] = colorBuffer[i * 3 + 2];
selectedIndices[i] = color.colorIndex;
}
}
现在,在显卡和操作系统选择都有效,但有时会选择坏顶点(在选择矩形之外)。
这怎么可能发生?有没有更好的方法来实现颜色选择,这种方法在各种图形卡上都很强大并且总是正确的?