在我的应用程序中,我需要捕获视频并在该视频上添加水印。水印应为文本(时间和备注)。我看到一个使用"QTKit“框架的代码。然而,我读到该框架不适用于iPhone。
提前谢谢。
发布于 2020-06-30 15:21:29
通过使用mikitamanko's blog中的swift示例代码向视频添加CALayer我做了一些小更改,以修复以下错误:
Error Domain=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedFailureReason=The video could not be composed., NSLocalizedDescription=Operation Stopped, NSUnderlyingError=0x2830559b0 {Error Domain=NSOSStatusErrorDomain Code=-17390 "(null)"}}
解决方案是在设置图层指令时使用构图的视频轨道,而不是原始视频轨道,如下面的swift 5代码所示:
static func addSketchLayer(url: URL, sketchLayer: CALayer, block: @escaping (Result<URL, VideoExportError>) -> Void) {
let composition = AVMutableComposition()
let vidAsset = AVURLAsset(url: url)
let videoTrack = vidAsset.tracks(withMediaType: AVMediaType.video)[0]
let duration = vidAsset.duration
let vid_timerange = CMTimeRangeMake(start: CMTime.zero, duration: duration)
let videoRect = CGRect(origin: .zero, size: videoTrack.naturalSize)
let transformedVideoRect = videoRect.applying(videoTrack.preferredTransform)
let size = transformedVideoRect.size
let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))!
try? compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: CMTime.zero)
compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
videolayer.opacity = 1.0
sketchLayer.contentsScale = 1
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
sketchLayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(sketchLayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.renderScale = 1.0
layercomposition.renderSize = CGSize(width: size.width, height: size.height)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayers: [videolayer], in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: composition.duration)
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionvideoTrack)
layerinstruction.setTransform(compositionvideoTrack.preferredTransform, at: CMTime.zero)
instruction.layerInstructions = [layerinstruction] as [AVVideoCompositionLayerInstruction]
layercomposition.instructions = [instruction] as [AVVideoCompositionInstructionProtocol]
let compositionAudioTrack:AVMutableCompositionTrack? = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let audioTracks = vidAsset.tracks(withMediaType: AVMediaType.audio)
for audioTrack in audioTracks {
try? compositionAudioTrack?.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: CMTime.zero)
}
let movieDestinationUrl = URL(fileURLWithPath: NSTemporaryDirectory() + "/exported.mp4")
try? FileManager().removeItem(at: movieDestinationUrl)
let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)!
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = movieDestinationUrl
assetExport.videoComposition = layercomposition
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status {
case AVAssetExportSessionStatus.failed:
print(assetExport.error ?? "unknown error")
block(.failure(.failed))
case AVAssetExportSessionStatus.cancelled:
print(assetExport.error ?? "unknown error")
block(.failure(.canceled))
default:
block(.success(movieDestinationUrl))
}
})
}
enum VideoExportError: Error {
case failed
case canceled
}
请注意,根据AVFoundation Crash on Exporting Video With Text Layer的说法,此代码仅在模拟器上崩溃,但在真实设备上工作
另请注意,宽度和高度是在应用首选视频变换后使用的。
https://stackoverflow.com/questions/7205820
复制相似问题