我有如下数据库表。
Schema::create('x_poll_questions', function (Blueprint $table) {
$table->engine = 'InnoDB';
$table->increments('id');
$table->string('question');
$table->text('batch_ids');
$table->timestamps();
});
我有类似的数据,
1 | test question 1 | ["1","4","2"]
2 | test question 2 | ["1","5","3"]
3 | test question 3 | ["1"]
4 | test question 4 | ["3", "2"]
在batch_ids
列中,我的值类似于["1","4","2"]
。这是php数组的json解码值。
现在,我该如何编写查询以检查在batch_ids列中是否存在指定的值(示例1,2)。如果是,则返回该行。
需要输出为
1 | test question 1 | ["1","4","2"]
4 | test question 4 | ["3", "2"]
更新后的答案
在民意调查模型中添加以下功能对我有用。
public function scopeFilterByBatchIds($q, $batchIds)
{
if ($batchIds) {
$batches = explode(',', $batchIds);
foreach ($batches as $batchId) {
$q->where('batch_ids', 'like', '%"' . $batchId . '"%');
}
}
return $q;
}
```
答案 0 :(得分:0)
在其中查找数据库查询,您需要进行相应的调整
import UIKit
import AVFoundation
import RxCocoa
import RxSwift
class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
private let disposeBag = DisposeBag()
let camPreview = UIView()
let imageView = UIImageView()
var camera: AVCaptureDevice!
var videoInput: AVCaptureDeviceInput!
let captureSession = AVCaptureSession()
private lazy var videoOutput: AVCaptureVideoDataOutput! = {
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
videoOutput.alwaysDiscardsLateVideoFrames = true
return videoOutput
}()
var assetWriter: AVAssetWriter!
var videoAssetInput: AVAssetWriterInput!
var pixelBuffer: AVAssetWriterInputPixelBufferAdaptor!
var startTime: CMTime!
var endTime: CMTime!
var frameNumber: Int64 = 0
override func viewDidLoad() {
super.viewDidLoad()
self.view.backgroundColor = UIColor.white
imageView.backgroundColor = UIColor.groupTableViewBackground
imageView.contentMode = .scaleAspectFill
imageView.layer.masksToBounds = true
self.view.addSubview(imageView)
imageView.snp.makeConstraints { make in
make.edges.equalToSuperview()
}
self.view.layoutIfNeeded()
if setupSession() {
startSession()
}
let startButton = UIButton()
startButton.setTitle("startButton", for: .normal)
startButton.rx.controlEvent(.touchUpInside)
.asDriver()
.drive(onNext: { self.startRecording() })
.disposed(by: disposeBag)
self.view.addSubview(startButton)
startButton.snp.makeConstraints { make in
make.center.equalTo(self.view.snp.center)
}
let stopButton = UIButton()
stopButton.setTitle("stopButton", for: .normal)
stopButton.rx.controlEvent(.touchUpInside)
.asDriver()
.drive(onNext: { self.stopRecording() })
.disposed(by: disposeBag)
self.view.addSubview(stopButton)
stopButton.snp.makeConstraints { make in
make.centerX.equalTo(self.view.snp.centerX)
make.top.equalTo(startButton.snp.bottom).offset(30.0)
}
}
private func startRecording() {
let documentPath = NSHomeDirectory() + "/Documents/"
let filePath = documentPath + "video.mp4"
let fileURL = URL(fileURLWithPath: filePath)
let videoSettings = [
AVVideoWidthKey: 480,
AVVideoHeightKey: 640,
AVVideoCodecKey: AVVideoCodecType.h264
] as [String: Any]
videoAssetInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
pixelBuffer = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoAssetInput, sourcePixelBufferAttributes: [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)])
frameNumber = 0
do {
try assetWriter = AVAssetWriter(outputURL: fileURL, fileType: .mp4)
videoAssetInput.expectsMediaDataInRealTime = true
assetWriter.add(videoAssetInput)
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: CMTime.zero)
print(#function)
} catch {
print(error)
}
}
private func stopRecording() {
if videoAssetInput == nil { return }
videoAssetInput.markAsFinished()
assetWriter.endSession(atSourceTime: endTime)
assetWriter.finishWriting {
self.videoAssetInput = nil
}
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.vga640x480
let camera = AVCaptureDevice.default(for: .video)
do {
let input = try AVCaptureDeviceInput(device: camera!)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
} catch {
print(error)
return false
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
return true
}
func startSession() {
if !captureSession.isRunning {
DispatchQueue.main.async {
self.captureSession.startRunning()
print(#function)
}
}
}
func stopSession() {
if captureSession.isRunning {
DispatchQueue.main.async {
self.captureSession.stopRunning()
}
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.portrait
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext(options: nil)
let imageRef = context.createCGImage(cameraImage, from: cameraImage.extent)
let image = UIImage(cgImage: imageRef!)
DispatchQueue.main.async {
self.imageView.filter(_image: image)
guard
let videoAssetInput = self.videoAssetInput,
let displayedImage = self.imageView.image
else {
return
}
if !CMSampleBufferDataIsReady(sampleBuffer) {
return
}
if self.frameNumber == 0 {
self.startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
}
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
let frameTime = CMTimeSubtract(timestamp, self.startTime)
if videoAssetInput.isReadyForMoreMediaData {
if let pxBuffer: CVPixelBuffer = self.buffer(from: displayedImage) {
self.pixelBuffer.append(pxBuffer, withPresentationTime: frameTime)
}
self.frameNumber += 1
}
self.endTime = frameTime
}
}
}
func buffer(from image: UIImage) -> CVPixelBuffer? {
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer : CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
guard (status == kCVReturnSuccess) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
context?.translateBy(x: 0, y: image.size.height)
context?.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context!)
image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer
}
}
搜索字段中的输入格式是什么