gpt4 book ai didi

swift - 如何将*单个图像*与视频合并

转载 作者:IT王子 更新时间:2023-10-29 05:19:31 26 4
gpt4 key购买 nike

我正在尝试将单个视频与单个图像结合起来。 这不是试图将许多图像组合成一个视频,例如

我正在使用 AVMutableComposition 来组合音轨。我的应用程序能够组合视频和图像(但就目前而言,视频组合很好!)我尝试使用 AVAssetWriter 将单个图像转换为视频(我相信这是我的地方问题是但不是 100% 确定)。然后我将其保存到应用程序(文档目录)。从那里,我在我的合并中访问它,并将视频和现在已变成视频的图像组合起来。

:

用户选择图片->

图片转成AVAssetWriter转为视频->

将我已经预设的视频与视频合并->

结果:从所选图像和预设视频制作 1 个视频。

我所拥有的问题:我的代码提供了一个空白空间,视频中的图像应该在这个位置。比如,我拥有的 ImageConverter 文件会将其转换为视频,但我只会看到最后一帧作为图像,而其他每一帧都是透明的,就好像图片不存在一样。因此,如果我将图像转换为视频 5 秒(比如说 30 帧/秒),那么我将看到 (30*5)-1 帧的空白区域,然后是最后一帧,图片最终会出现。我只是在寻找有关如何将单个图像制作成视频将视频和图像组合在一起而不将图像转换为视频的指导。谢谢!

在此处合并文件

func merge() {
if let firstAsset = controller.firstAsset, secondAsset = self.asset {

// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()

let firstTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, CMTime(seconds: 8, preferredTimescale: 600)),
ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] ,
atTime: kCMTimeZero)
} catch _ {
print("Failed to load first track")
}

do {
//HERE THE TIME IS 0.666667, BUT SHOULD BE 0
print(CMTimeGetSeconds(secondAsset.duration), CMTimeGetSeconds(firstTrack.timeRange.duration))
try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAsset.duration),
ofTrack: secondAsset.tracksWithMediaType(AVMediaTypeVideo)[0],
atTime: firstTrack.timeRange.duration)
} catch _ {
print("Failed to load second track")
}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(CMTime(seconds: 8+CMTimeGetSeconds(secondAsset.duration), preferredTimescale: 600), firstAsset.duration),
ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] ,
atTime: firstTrack.timeRange.duration+secondTrack.timeRange.duration)
} catch _ {
print("failed")
}

// 3 - Audio track
if let loadedAudioAsset = controller.audioAsset {
let audioTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: 0)
do {
try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration),
ofTrack: loadedAudioAsset.tracksWithMediaType(AVMediaTypeAudio)[0] ,
atTime: kCMTimeZero)
} catch _ {
print("Failed to load Audio track")
}
}

// 4 - Get path
let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0]
let dateFormatter = NSDateFormatter()
dateFormatter.dateStyle = .LongStyle
dateFormatter.timeStyle = .ShortStyle
let date = dateFormatter.stringFromDate(NSDate())
let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo.mov")
let url = NSURL(fileURLWithPath: savePath)
_ = try? NSFileManager().removeItemAtURL(url)

// 5 - Create Exporter
print("exporting")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = url
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.shouldOptimizeForNetworkUse = false
exporter.videoComposition = mainComposition

// 6 - Perform the Export
controller.currentlyEditing = true
exporter.exportAsynchronouslyWithCompletionHandler() {
dispatch_async(dispatch_get_main_queue()) { _ in
print("done")
self.controller.currentlyEditing = false
self.controller.merged = true
self.button.blurView.superview?.hidden = true
self.controller.player.replaceCurrentItemWithPlayerItem(AVPlayerItem(URL: url))
self.controller.firstAsset = AVAsset(URL: url)
}
}
}
}
func exportDidFinish(session: AVAssetExportSession) {
if session.status == AVAssetExportSessionStatus.Failed {
print(session.error)
}
if session.status == AVAssetExportSessionStatus.Completed {
print("succed")
}
}

在这里转换图片

class MyConverter: NSObject {

var image:UIImage!

convenience init(image:UIImage) {
self.init()
self.image = image
}

var outputURL: NSURL {
let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0]
let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo-pic.mov")
return getURL(savePath)
}

func getURL(path:String) -> NSURL {
let movieDestinationUrl = NSURL(fileURLWithPath: path)
_ = try? NSFileManager().removeItemAtURL(movieDestinationUrl)
let url = NSURL(fileURLWithPath: path)
return url
}

func build(completion:() -> Void) {
guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(image.size.width)), AVVideoHeightKey : NSNumber(float: Float(image.size.height))]

guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}

let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(image.size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(image.size.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)

if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}

if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
}

let media_queue = dispatch_queue_create("mediaInputQueue", nil)

videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
var appendSucceeded = true
//Time HERE IS ZERO, but in Merge file, it is 0.66667
let presentationTime = CMTimeMake(0, 600)

var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)

if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)

let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(self.image.size.width), Int(self.image.size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)

CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.image.size.width), CGFloat(self.image.size.height)))


CGContextDrawImage(context, CGRectMake(0, 0, self.image.size.width, self.image.size.height), self.image.CGImage)

CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)

appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
if !appendSucceeded {
print("append failed")
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
completion()
}
})
}
}

注意:我发现如果我在 ImageConverter 中执行 print(presentationTime) 它会打印 0,然后在合并中打印持续时间的时间, 我得到 0.666667

注意:目前还没有答案,但我会一直把这个问题设为悬赏,直到我找到答案或有人帮助我!谢谢!

最佳答案

是的,所以我前阵子确实处理过这个问题。问题确实在于您如何从图片创建视频。您需要做的是在时间为零时添加像素缓冲区,然后在最后添加 AGAIN,否则您将在最后一帧之前得到一个空视频,就像您正在经历的那样。

以下代码将是我更新您的代码的最佳尝试。最后,我将发布我在 Objective-C 中的解决方案,以防它对其他人有帮助。

func build(completion:() -> Void) {
guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else {
fatalError("AVAssetWriter error")
}

// This might not be a problem for you but width HAS to be divisible by 16 or the movie will come out distorted... don't ask me why. So this is a safeguard
let pixelsToRemove: Double = fmod(image.size.width, 16)
let pixelsToAdd: Double = 16 - pixelsToRemove
let size: CGSize = CGSizeMake(image.size.width + pixelsToAdd, image.size.height)

let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(size.width)), AVVideoHeightKey : NSNumber(float: Float(size.height))]

guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}

let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(size.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)

if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}

if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
}

// For simplicity, I'm going to remove the media queue you created and instead explicitly wait until I can append since i am only writing one pixel buffer at two different times

var pixelBufferCreated = true
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)

if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)

let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)

CGContextClearRect(context, CGRectMake(0, 0, CGFloat(size.width), CGFloat(size.height)))

CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.image.CGImage)

CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
} else {
print("Failed to allocate pixel buffer")
pixelBufferCreated = false
}

if (pixelBufferCreated) {
// Here is where the magic happens, we have our pixelBuffer it's time to start writing

// FIRST - add at time zero
var appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: kCMTimeZero];
if (!appendSucceeded) {
// something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though
}
// SECOND - wait until the writer is ready for more data with an empty while
while !writerInput.readyForMoreMediaData {}

// THIRD - make a CMTime with the desired length of your picture-video. I am going to arbitrarily make it 5 seconds here
let frameTime: CMTime = CMTimeMake(5, 1) // 5 seconds

// FOURTH - add the same exact pixel to the end of the video you are creating
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: frameTime];
if (!appendSucceeded) {
// something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though
}

videoWriterInput.markAsFinished() {
videoWriter.endSessionAtSourceTime(frameTime)
}
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if videoWriter.status != .Completed {
// Error writing the video... handle appropriately
} else {
print("FINISHED!!!!!")
completion()
}
}
}
}

我是如何在 Obj-C 中做到这一点的

注意:我必须进行一些编辑才能使其独立,因此此方法将返回一个字符串,其中包含视频写入的路径。它在视频写入完成之前返回,因此如果您不小心,可能可以在它准备好之前访问它

-(NSString *)makeMovieFromImageData:(NSData *)imageData {
NSError *error;
UIImage *image = [UIImage imageWithData:imageData];


// width has to be divisible by 16 or the movie comes out distorted... don't ask me why
double pixelsToRemove = fmod(image.size.width, 16);

double pixelsToAdd = 16 - pixelsToRemove;

CGSize size = CGSizeMake(image.size.width+pixelsToAdd, image.size.height);

BOOL hasFoundValidPath = NO;
NSURL *tempFileURL;
NSString *outputFile;

while (!hasFoundValidPath) {

NSString *guid = [[NSUUID new] UUIDString];
outputFile = [NSString stringWithFormat:@"picture_%@.mp4", guid];

NSString *outputDirectory = [NSSearchPathForDirectoriesInDomains(NSTemporaryDirectory, NSUserDomainMask, YES) objectAtIndex:0];

NSString *tempPath = [outputDirectory stringByAppendingPathComponent:outputFile];

// Will fail if destination already has a file
if ([[NSFileManager defaultManager] fileExistsAtPath:tempPath]) {
continue;
} else {
hasFoundValidPath = YES;
}
tempFileURL = [NSURL fileURLWithPath:tempPath];
}


// Start writing
AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:tempFileURL
fileType:AVFileTypeQuickTimeMovie
error:&error];

if (error) {
// handle error
}

NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
AVVideoCodecH264, AVVideoCodecKey,
[NSNumber numberWithInt:size.width], AVVideoWidthKey,
[NSNumber numberWithInt:size.height], AVVideoHeightKey,
nil];

AVAssetWriterInput* writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo
outputSettings:videoSettings];

NSDictionary *bufferAttributes = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil];

AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput
sourcePixelBufferAttributes:bufferAttributes];
if ([videoWriter canAddInput:writerInput]) {
[videoWriter addInput:writerInput];
} else {
// handle error
}

[videoWriter startWriting];

[videoWriter startSessionAtSourceTime:kCMTimeZero];

CGImageRef img = [image CGImage];

// Now I am going to create the bixelBuffer
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef buffer = NULL;

CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width,
size.height, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
&pxbuffer);

if ( !(status == kCVReturnSuccess && pxbuffer != NULL) ) {
NSLog(@"There be some issue. We didn't get a buffer from the image");
}


CVPixelBufferLockBaseAddress(buffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(buffer);

CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();

CGContextRef context = CGBitmapContextCreate(pxdata, size.width,
size.height, 8, 4*size.width, rgbColorSpace,
(CGBitmapInfo)kCGImageAlphaPremultipliedFirst);
CGContextSetRGBFillColor(context, 0, 0, 0, 0);

CGContextConcatCTM(context, CGAffineTransformIdentity);

CGContextDrawImage(context, CGRectMake(0, 0, size.width,
size.height), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);

CVPixelBufferUnlockBaseAddress(buffer, 0);

// At this point we have our buffer so we are going to start by adding to time zero

[adaptor appendPixelBuffer:buffer withPresentationTime:kCMTimeZero];

while (!writerInput.readyForMoreMediaData) {} // wait until ready

CMTime frameTime = CMTimeMake(5, 1); // 5 second frame

[adaptor appendPixelBuffer:buffer withPresentationTime:frameTime];
CFRelease(buffer);

[writerInput markAsFinished];

[videoWriter endSessionAtSourceTime:frameTime];

[videoWriter finishWritingWithCompletionHandler:^{
if (videoWriter.status != AVAssetWriterStatusCompleted) {
// Error
}
}]; // end videoWriter finishWriting Block

// NOTE: the URL is actually being returned before the videoWriter finishes writing so be careful to not access it until it's ready
return outputFile;
}

关于swift - 如何将*单个图像*与视频合并,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/38046306/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com