我正在进行光线跟踪,并决定使用边界框(轴对齐的bbox)作为对象(立方体),然后对它们进行着色。我能够找到正确的import UIKit
import MobileCoreServices
import AVFoundation
import AVKit
class ViewControllerPhoto: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, UIPickerViewDelegate, UIGestureRecognizerDelegate, ACEDrawingViewDelegate, UITextViewDelegate, AVCaptureFileOutputRecordingDelegate, UITableViewDelegate, UITableViewDataSource {
@IBOutlet weak var captureButton: UIButton!
var videoCheck: Bool = false
let captureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer?
var captureDevice : AVCaptureDevice?
var movieFileOutput = AVCaptureMovieFileOutput()
var imageData: NSData!
var outputPath: NSString!
var outputURL: NSURL!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
if captureSession.canSetSessionPreset(AVCaptureSessionPresetMedium) {
captureSession.sessionPreset = AVCaptureSessionPresetMedium
}
let devices = AVCaptureDevice.devices()
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
self.videoCheck = false
}
func beginSession() {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
configureDevice()
var err : NSError? = nil
// captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
do{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
}
catch{
print("error: \(err?.localizedDescription)")
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.view.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.view.layer.frame
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
if captureSession.canAddOutput(movieFileOutput) {
self.captureSession.addOutput(movieFileOutput)
}
// SET CONNECTION PROPERTIES
var captureConnection: AVCaptureConnection = movieFileOutput.connectionWithMediaType(AVMediaTypeVideo)
if captureConnection.supportsVideoOrientation {
captureConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
}
var audioDevice: AVCaptureDevice = AVCaptureDevice.devicesWithMediaType(AVMediaTypeAudio)[0] as! AVCaptureDevice
do{
let audioDeviceInput: AVCaptureDeviceInput = try AVCaptureDeviceInput(device: audioDevice)
if captureSession.canAddInput(audioDeviceInput) {
captureSession.addInput(audioDeviceInput)
}
}
catch {
print("error")
}
captureSession.startRunning()
}
func captureVideo() {
outputPath = (NSURL(fileURLWithPath: NSTemporaryDirectory())).URLByAppendingPathComponent("movie.mov").absoluteString as NSString
outputURL = NSURL(fileURLWithPath: outputPath as String)
let fileManager: NSFileManager = NSFileManager.defaultManager()
if outputURL.path != nil{
if fileManager.fileExistsAtPath(outputURL.path!) {
do{
try fileManager.removeItemAtPath(outputPath as String)
}
catch{
print(error)
}
}
}
self.movieFileOutput.startRecordingToOutputFileURL(outputURL, recordingDelegate: self)
}
func cameraWithPosition(position: AVCaptureDevicePosition) -> AVCaptureDevice {
let devices: NSArray = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
for device in devices {
if(device.position == position){
return device as! AVCaptureDevice
}
}
return AVCaptureDevice()
}
@IBAction func captureButtonIsLongPressed(sender: UILongPressGestureRecognizer) {
if sender.state == UIGestureRecognizerState.Began {
videoCheck = true
captureVideo()
}
else if sender.state == UIGestureRecognizerState.Ended{
self.movieFileOutput.stopRecording()
}
}
func captureOutput(captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAtURL outputFileURL: NSURL!, fromConnections connections: [AnyObject]!, error: NSError!) {
print("Output")
playVideo()
}
func playVideo() {
let path = outputPath
let url = outputURL
let player = AVPlayer(URL: url)
let playerLayer = AVPlayerLayer(player: player)
playerLayer.frame = self.view.bounds
player.play()
}
}
值和交点;但是,我找不到计算表面法线的方法,因为我只有t
,ray direction
,ray origin
,intersection point
和t value
值bbox。
有没有办法用我的信息计算交叉点处的法线(或决定立方体光线的哪个面相交)?
我正在使用Williams等人的“高效且稳健的Ray-Box交叉算法”
答案 0 :(得分:0)
如果您有交叉点和AABB(BoundingBox)中心,您可以快速计算以获得与您所击中的面相对应的索引。
然后使用存储法线的数组,您可以获取数据。
Vector3 ComputeNormal(Vector3 inter, Vector3 aabbCenter)
{
static const Vector3 normals[] = { // A cube has 3 possible orientations
Vector3(1,0,0),
Vector3(0,1,0),
Vector3(0,0,1)
};
const Vector3 interRelative = inter - aabbCenter;
const float xyCoef = interRelative.y / interRelative.x;
const float zyCoef = interRelative.y / interRelative.z;
const int coef = (isBetweenInclusive<1,-1>(xyCoef) ? 1 :
(isBetweenExclusive<1,-1>(zyCoef) ? 2 : 0));
// Here it's exclusive to avoid coef to be 3
return normals[coef] * SIGN(interRelative); // The sign he is used to know direction of the normal
}
我没有对它进行过测试,所以如果它不能直接起作用也不要感到惊讶;)但它应该可以解决这个问题。