Reputation: 680
I have a SwiftUI view on top of a video view, which has a standard size, full width, and almost full height (always portrait). The video's ratio varies. The SwiftUI view has a Text
that the user can drag-and-drop and scale its font.
struct EditPostLyricsTextView: View {
@EnvironmentObject private var viewModel: EditPostView.ViewModel
var body: some View {
TitleView(
text: viewModel.currentLyricsText,
font: UIFont(name: viewModel.lyricsMetadata.fontName, size: 17)!,
fontSize: 25 * viewModel.lyricsMetadata.fontScale,
alignment: viewModel.lyricsMetadata.alignment,
textColor: viewModel.lyricsMetadata.color
)
.padding(50)
.frame(maxWidth: .infinity)
.padding(.horizontal, 20)
.overlay {
Color.responsiveClear
}
.position(viewModel.lyricsMetadata.coordinates)
.gesture(
MagnificationGesture()
.onChanged { value in
let delta = value / viewModel.lyricsMetadata.fontScale
let newScale = viewModel.lyricsMetadata.fontScale * delta
viewModel.lyricsMetadata.fontScale = min(2, max(1, newScale))
}.simultaneously(with: DragGesture()
.onChanged { value in
viewModel.lyricsMetadata.coordinates = value.location
}).simultaneously(with: TapGesture()
.onEnded(viewModel.presentEditLyricsView))
)
.clipped()
}
}
Then, I need to export it with the text overlays, ensuring that the position and size must match at all video resolutions.
struct TextSegment {
let text: String
let startTime: Double
let endTime: Double
let originalCanvasSize: CGSize
let padding: CGFloat
var coordinates: CGPoint
let color: UIColor
let fontName: String
let fontSize: CGFloat
let alignment: NSTextAlignment
}
struct VideoEditorService {
func addTextOverlaysToVideo(videoURL: URL, textSegments: [TextSegment], completion: @escaping (URL?) -> Void) {
let videoAsset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
let timeRange = CMTimeRange(start: .zero, duration: videoAsset.duration)
guard let videoTrack = videoAsset.tracks(withMediaType: .video).first,
let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid) else {
completion(nil)
return
}
do {
if let audioAssetTrack = videoAsset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(
withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid) {
try compositionAudioTrack.insertTimeRange(
timeRange,
of: audioAssetTrack,
at: .zero)
}
try compositionTrack.insertTimeRange(timeRange, of: videoTrack, at: .zero)
let videoComposition = AVMutableVideoComposition(propertiesOf: videoAsset)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoComposition.renderSize)
textSegments.forEach { segment in
addText(
textSegment: segment,
to: overlayLayer,
videoSize: videoComposition.renderSize
)
}
// Apply the overlay layer with animations
let parentLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoComposition.renderSize.width, height: videoComposition.renderSize.height)
parentLayer.addSublayer(overlayLayer)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: overlayLayer, in: parentLayer)
// Export the video
exportVideo(
originalVideoURL: videoURL,
composition: composition,
videoComposition: videoComposition,
completion: completion
)
} catch {
print("Failed to add text overlay: \(error)")
completion(nil)
}
}
private func addText(textSegment: TextSegment, to layer: CALayer, videoSize: CGSize) {
let originalWidth = textSegment.originalCanvasSize.width - textSegment.padding
let maxWidth = (originalWidth * videoSize.width) / textSegment.originalCanvasSize.width
let fontSize = min((videoSize.width * textSegment.fontSize) / textSegment.originalCanvasSize.width, (videoSize.height * textSegment.fontSize) / textSegment.originalCanvasSize.height)
let standardRatio = (textSegment.originalCanvasSize.width / textSegment.fontSize) * (textSegment.originalCanvasSize.height / textSegment.fontSize)
let attributedText = NSAttributedString(
string: textSegment.text,
attributes: [
.font: UIFont(name: textSegment.fontName, size: fontSize) ?? .defaultHeavy(size: fontSize),
.foregroundColor: textSegment.color,
]
)
let textLayer = CATextLayer()
textLayer.string = attributedText
// textLayer.backgroundColor = UIColor.red.cgColor
textLayer.alignmentMode = textSegment.alignment == .center ? .center : textSegment.alignment == .left ? .left : .right
textLayer.isWrapped = true
textLayer.truncationMode = .end
textLayer.contentsScale = UIScreen.main.scale
textLayer.shouldRasterize = true
textLayer.rasterizationScale = UIScreen.main.scale
textLayer.opacity = 0
let textSize = attributedText.boundingRect(with: CGSize(width: maxWidth, height: .infinity),
options: [.usesLineFragmentOrigin, .usesFontLeading],
context: nil).size
let height = ceil(textSize.height)
print("maxWidth", maxWidth)
print("textSize", ceil(textSize.height))
print("textSize", textSegment.coordinates)
print("textSize", videoSize)
let normalisedX = ((videoSize.width * textSegment.coordinates.x) / textSegment.originalCanvasSize.width) - (maxWidth / 2)
let normalisedY = (videoSize.height - (videoSize.height * textSegment.coordinates.y) / textSegment.originalCanvasSize.height) - (height / 2)
textLayer.frame = CGRect(
x: normalisedX,
y: normalisedY,
width: maxWidth,
height: height
)
textLayer.displayIfNeeded()
let startVisible = CABasicAnimation(keyPath: "opacity")
startVisible.fromValue = 0.0
startVisible.toValue = 1.0
startVisible.duration = 0.05 // for appearing in duration
startVisible.beginTime = AVCoreAnimationBeginTimeAtZero + textSegment.startTime
startVisible.isRemovedOnCompletion = false
startVisible.fillMode = CAMediaTimingFillMode.forwards
textLayer.add(startVisible, forKey: "startAnimation")
let endVisible = CABasicAnimation(keyPath: "opacity")
endVisible.fromValue = 1.0
endVisible.toValue = 0.0
endVisible.duration = 0.05 // for disappearing in duration
endVisible.beginTime = AVCoreAnimationBeginTimeAtZero + textSegment.endTime
endVisible.fillMode = CAMediaTimingFillMode.forwards
endVisible.isRemovedOnCompletion = false
textLayer.add(endVisible, forKey: "endAnimation")
layer.addSublayer(textLayer)
}
}
I've tried to make everything proportional, from the font size to the coordinates. But, although there are times that the sizes look identical to the SwiftUI's view, other times the exported video can have an extra line.
Is there anything I'm doing wrong?
Upvotes: 0
Views: 46