如何在谷歌视觉apis中检测超过10个面孔

时间:2017-09-07 11:35:59

标签: node.js google-cloud-platform face-detection google-cloud-vision

嗨,我是google vision apis的新手。我想检测图像上的面,我正在使用node.js.包含10个以上面孔的本地图像。但视觉api仅返回10个面部检测。有没有办法使用这个Vision api检测所有的面孔。请参考vision node api。 并且您可以将此图像作为参考enter image description here

这是我的代码

function findFaceontheImage(req, res, next) {
        var vision = Vision();
        var inputfile = 'NASA_Astronaut_Group_15.jpg';
        var outputFile = 'out.png';
        vision.faceDetection({source: {filename: inputfile}})
            .then(function (results) {

            const faces = results[0].faceAnnotations;
            console.log('Faces:');

            req.body['faces']=results;
            var numFaces = faces.length;
            console.log('Found ' + numFaces + (numFaces === 1 ? ' face' : ' faces'));

            highlightFaces(inputfile, faces, outputFile, Canvas, function (err) {
                if (err) {
                    next()
                }
                console.log("Finished!");
                next()
            });


        })
        .catch(function (err) {
            console.error('ERROR:', err);
        });

}

function highlightFaces(inputFile, faces, outputFile, Canvas, callback) {
    fs.readFile(inputFile, function (err, image) {
        if (err) {
            return callback(err);
        }

        var Image = Canvas.Image;
        // Open the original image into a canvas
        var img = new Image();
        img.src = image;
        var canvas = new Canvas(img.width, img.height);
        var context = canvas.getContext("2d");
        context.drawImage(img, 0, 0, img.width, img.height);

        // Now draw boxes around all the faces
        context.strokeStyle = "rgba(0,255,0,0.8)";
        context.lineWidth = "5";

        faces.forEach(function (face) {
            context.beginPath();
            var origX = 0;
            var origY = 0;
            face.boundingPoly.vertices.forEach(function (bounds, i) {
                if (i === 0) {
                    origX = bounds.x;
                    origY = bounds.y;
                }
                context.lineTo(bounds.x, bounds.y);
            });
            context.lineTo(origX, origY);
            context.stroke();
        });

        // Write the result to a file
        console.log("Writing to file " + outputFile);
        var writeStream = fs.createWriteStream(outputFile);
        var pngStream = canvas.pngStream();

        pngStream.on("data", function (chunk) {
            writeStream.write(chunk);
        });
        pngStream.on("error", console.log);
        pngStream.on("end", callback);
    });
}

2 个答案:

答案 0 :(得分:1)

万一还有其他人在这个话题上苦苦挣扎。

使用 Node.js客户端库,您可以将ImprovedRequest对象传递给client.faceDetection(..)方法,而不使用文件路径或imageuri。

例如,在我的情况下,我希望api处理我的GCS中的图像。因此,不要将imageuri放置为string。我会做下面的事情。

import { protos } from '@google-cloud/vision';

// BEFORE
const [result] = await CLIENT.faceDetection(`gs://${bucketName}/${filePath}`);

// AFTER
const [result] = await CLIENT.faceDetection({
  image: { 
    source: { imageUri: `gs://${bucketName}/${filePath}` } 
  },
  features: [
    {
      maxResults: 100,
      type: protos.google.cloud.vision.v1.Feature.Type.FACE_DETECTION,
    },
  ],
});

答案 1 :(得分:0)

以防万一没有人会提出强制API返回更多结果的解决方案,伪代码:

def process(image)
  faces = process image
  return faces if faces.size < 10
  split image into two a bit overlapping half1 and half2
  # we do overlapping because splitting may split a face
  a = process(half1)
  b = process(half2)
  return a + b - intersection(a + b)

交点函数应该抛弃那些相同的图像(考虑到可能的+/-几个像素误差)坐标加上我们在图像的half1和half2之间的偏移。