我将把一些用于linux的截图抓取代码(C ++)移植到osx。当前的解决方案在xvfb中运行图形应用程序,然后使用xlib从显示中获取屏幕截图。 (如果我们在没有xvfb的情况下运行,那也会支持。)
因为我理解osx正在从X11移动away所以我的问题是除了xlib之外还要使用什么来实现呢?我找到了Quartz Display Services。现在使用是否有意义?这适用于xvfb吗?
答案 0 :(得分:4)
是的,您可以通过将Application Services框架链接到C ++工具来调用CGDisplayCreateImage
等函数(为您链接的文档)。
答案 1 :(得分:0)
void captureScreen(){
CGImageRef image_ref = CGDisplayCreateImage(CGMainDisplayID());
CGDataProviderRef provider = CGImageGetDataProvider(image_ref);
CFDataRef dataref = CGDataProviderCopyData(provider);
size_t width, height; width = CGImageGetWidth(image_ref);
height = CGImageGetHeight(image_ref);
size_t bpp = CGImageGetBitsPerPixel(image_ref) / 8;
uint8 *pixels = malloc(width * height * bpp);
memcpy(pixels, CFDataGetBytePtr(dataref), width * height * bpp);
CFRelease(dataref);
CGImageRelease(image_ref);
FILE *stream = fopen("/Users/username/Desktop/screencap.raw", "w+");
fwrite(pixels, bpp, width * height, stream);
fclose(stream);
free(pixels);
}
或在C#中:
// https://stackoverflow.com/questions/1537587/capture-screen-image-in-c-on-osx
// https://github.com/Acollie/C-Screenshot-OSX/blob/master/C%2B%2B-screenshot/C%2B%2B-screenshot/main.cpp
// https://github.com/ScreenshotMonitor/ScreenshotCapture/blob/master/src/Pranas.ScreenshotCapture/ScreenshotCapture.cs
// https://screenshotmonitor.com/blog/capturing-screenshots-in-net-and-mono/
namespace rtaStreamingServer
{
// https://github.com/xamarin/xamarin-macios
// https://qiita.com/shimshimkaz/items/18bcf4767143ea5897c7
public static class OSxScreenshot
{
private const string LIBCOREGRAPHICS = "/System/Library/Frameworks/CoreGraphics.framework/CoreGraphics";
[System.Runtime.InteropServices.DllImport(LIBCOREGRAPHICS)]
private static extern System.IntPtr CGDisplayCreateImage(System.UInt32 displayId);
[System.Runtime.InteropServices.DllImport(LIBCOREGRAPHICS)]
private static extern void CFRelease(System.IntPtr handle);
public static void TestCapture()
{
Foundation.NSNumber mainScreen = (Foundation.NSNumber)AppKit.NSScreen.MainScreen.DeviceDescription["NSScreenNumber"];
using (CoreGraphics.CGImage cgImage = CreateImage(mainScreen.UInt32Value))
{
// https://stackoverflow.com/questions/17334786/get-pixel-from-the-screen-screenshot-in-max-osx/17343305#17343305
// Get byte-array from CGImage
// https://gist.github.com/zhangao0086/5fafb1e1c0b5d629eb76
AppKit.NSBitmapImageRep bitmapRep = new AppKit.NSBitmapImageRep(cgImage);
// var imageData = bitmapRep.representationUsingType(NSBitmapImageFileType.NSPNGFileType, properties: [:])
Foundation.NSData imageData = bitmapRep.RepresentationUsingTypeProperties(AppKit.NSBitmapImageFileType.Png);
long len = imageData.Length;
byte[] bytes = new byte[len];
System.Runtime.InteropServices.GCHandle pinnedArray = System.Runtime.InteropServices.GCHandle.Alloc(bytes, System.Runtime.InteropServices.GCHandleType.Pinned);
System.IntPtr pointer = pinnedArray.AddrOfPinnedObject();
// Do your stuff...
imageData.GetBytes(pointer, new System.IntPtr(len));
pinnedArray.Free();
using (AppKit.NSImage nsImage = new AppKit.NSImage(cgImage, new System.Drawing.SizeF(cgImage.Width, cgImage.Height)))
{
// ImageView.Image = nsImage;
// And now ? How to get the image bytes ?
// https://theconfuzedsourcecode.wordpress.com/2016/02/24/convert-android-bitmap-image-and-ios-uiimage-to-byte-array-in-xamarin/
// https://stackoverflow.com/questions/5645157/nsimage-from-byte-array
// https://stackoverflow.com/questions/53060723/nsimage-source-from-byte-array-cocoa-app-xamarin-c-sharp
// https://gist.github.com/zhangao0086/5fafb1e1c0b5d629eb76
// https://www.quora.com/What-is-a-way-to-convert-UIImage-to-a-byte-array-in-Swift?share=1
// https://stackoverflow.com/questions/17112314/converting-uiimage-to-byte-array
} // End Using nsImage
} // End Using cgImage
} // End Sub TestCapture
public static CoreGraphics.CGImage CreateImage(System.UInt32 displayId)
{
System.IntPtr handle = System.IntPtr.Zero;
try
{
handle = CGDisplayCreateImage(displayId);
return new CoreGraphics.CGImage(handle);
}
finally
{
if (handle != System.IntPtr.Zero)
{
CFRelease(handle);
}
}
} // End Sub CreateImage
} // End Class OSxScreenshot
} // End Namespace rtaStreamingServer
答案 2 :(得分:0)
我写了一个捕获PC显示屏并将其转换为opencv Mat的示例。
#include <iostream>
#include <opencv2/opencv.hpp>
#include <unistd.h>
#include <stdio.h>
#include <ApplicationServices/ApplicationServices.h>
using namespace std;
using namespace cv;
int main (int argc, char * const argv[])
{
size_t width = CGDisplayPixelsWide(CGMainDisplayID());
size_t height = CGDisplayPixelsHigh(CGMainDisplayID());
Mat im(cv::Size(width,height), CV_8UC4);
Mat bgrim(cv::Size(width,height), CV_8UC3);
Mat resizedim(cv::Size(width,height), CV_8UC3);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef contextRef = CGBitmapContextCreate(
im.data, im.cols, im.rows,
8, im.step[0],
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
while (true)
{
CGImageRef imageRef = CGDisplayCreateImage(CGMainDisplayID());
CGContextDrawImage(contextRef,
CGRectMake(0, 0, width, height),
imageRef);
cvtColor(im, bgrim, CV_RGBA2BGR);
resize(bgrim, resizedim,cv::Size(),0.5,0.5);
imshow("test", resizedim);
cvWaitKey(10);
CGImageRelease(imageRef);
}
// CGContextRelease(contextRef);
// CGColorSpaceRelease(colorSpace);
return 0;
}
我原本希望可以捕获当前的显示,但是实际上仅捕获了后墙纸。 CGMainDisplayID()指的是这个问题的提示。
无论如何,我希望这可能会达到您的目标。