简介和背景:
我一直在做一个项目,让用户可以通过相机进行一些自定义操作(实时反馈)
目前,我以下列方式启动捕获会话:
from PyQt4 import QtGui
from PyQt4 import QtCore
import ui_test #Gui File
import sys
import pyqtgraph as pg
class Gui(QtGui.QMainWindow, ui_test.Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self) # This is defined in ui_pumptest.py file automatically
self.plot()
def plot(self):
vb = pg.ViewBox()
self.graphicsView.setCentralItem(vb)
def main():
app = QtGui.QApplication(sys.argv) # A new instance of QApplication
form = Gui() # We set the form to be our ExampleApp (design)
form.show() # Show the form
app.exec_() # and execute the. app
if __name__ == '__main__': # if we're running file directly and not importing it
main() # run the main function
其中var session: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
videoPreviewLayer!.frame = CameraView.bounds
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
session = AVCaptureSession()
session!.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: backCamera)
} catch let error1 as NSError {
error = error1
input = nil
}
if error == nil && session!.canAddInput(input) {
session!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if session!.canAddOutput(stillImageOutput) {
session!.addOutput(stillImageOutput)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: session)
videoPreviewLayer!.videoGravity = AVLayerVideoGravityResizeAspect
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
CameraView.layer.addSublayer(videoPreviewLayer!)
session!.startRunning()
}
}
}
是我的viewcontroller的UIView。我现在有一个名为CameraView
的函数,我想获取捕获的每一帧,处理它,然后放入CameraView框架(也许我应该使用UIImageView?)...
研究
我看过here和here以及许多其他人来获取相机的画面,但这些并不一定能在我需要的地方得出结论。在我提供的第一个链接中有趣的是:在他们的答案中,他们有:
singleTapped()
哪个确实从相机获得了UIImage ,但这是30fps的可行方法吗?
理性和约束:
我之所以需要UIImage的原因是因为我正在利用其他人编写的库来快速自定义地转换UIImage。我想将这种转变呈现给用户" live"。
总结
如果我遗失了某些内容,或者我应该改写某些内容,请告诉我。如上所述,这是我的第一篇文章,所以我对这些细微差别并不十分强烈。谢谢,欢呼
答案 0 :(得分:0)