mirror of
https://github.com/androidx/media.git
synced 2025-05-17 04:29:55 +08:00
Support added/removed audio track between MediaItems
- Add silent audio when the output contains an audio track but the current MediaItem doesn't have any audio. - Add an audio track when generateSilentAudio is set to true. PiperOrigin-RevId: 511005887
This commit is contained in:
parent
9fa8aba32e
commit
79d32c2488
@ -35,7 +35,6 @@ import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
import org.checkerframework.checker.nullness.qual.EnsuresNonNullIf;
|
||||
import org.checkerframework.dataflow.qual.Pure;
|
||||
|
||||
/** Pipeline to process, re-encode and mux raw audio samples. */
|
||||
@ -44,7 +43,7 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
private static final int MAX_INPUT_BUFFER_COUNT = 10;
|
||||
private static final int DEFAULT_ENCODER_BITRATE = 128 * 1024;
|
||||
|
||||
@Nullable private final SilentAudioGenerator silentAudioGenerator;
|
||||
private final SilentAudioGenerator silentAudioGenerator;
|
||||
private final Queue<DecoderInputBuffer> availableInputBuffers;
|
||||
private final Queue<DecoderInputBuffer> pendingInputBuffers;
|
||||
private final AudioProcessingPipeline audioProcessingPipeline;
|
||||
@ -56,7 +55,7 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
private long nextEncoderInputBufferTimeUs;
|
||||
private long encoderBufferDurationRemainder;
|
||||
|
||||
private volatile long mediaItemOffsetUs;
|
||||
private volatile boolean queueEndOfStreamAfterSilence;
|
||||
|
||||
// TODO(b/260618558): Move silent audio generation upstream of this component.
|
||||
public AudioSamplePipeline(
|
||||
@ -66,19 +65,13 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
TransformationRequest transformationRequest,
|
||||
boolean flattenForSlowMotion,
|
||||
ImmutableList<AudioProcessor> audioProcessors,
|
||||
long forceAudioTrackDurationUs,
|
||||
Codec.EncoderFactory encoderFactory,
|
||||
MuxerWrapper muxerWrapper,
|
||||
FallbackListener fallbackListener)
|
||||
throws ExportException {
|
||||
super(firstInputFormat, streamStartPositionUs, muxerWrapper);
|
||||
|
||||
if (forceAudioTrackDurationUs != C.TIME_UNSET) {
|
||||
silentAudioGenerator = new SilentAudioGenerator(firstInputFormat, forceAudioTrackDurationUs);
|
||||
} else {
|
||||
silentAudioGenerator = null;
|
||||
}
|
||||
|
||||
silentAudioGenerator = new SilentAudioGenerator(firstInputFormat);
|
||||
availableInputBuffers = new ConcurrentLinkedDeque<>();
|
||||
ByteBuffer emptyBuffer = ByteBuffer.allocateDirect(0).order(ByteOrder.nativeOrder());
|
||||
for (int i = 0; i < MAX_INPUT_BUFFER_COUNT; i++) {
|
||||
@ -150,20 +143,30 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
|
||||
@Override
|
||||
public void onMediaItemChanged(
|
||||
EditedMediaItem editedMediaItem, Format trackFormat, long mediaItemOffsetUs) {
|
||||
this.mediaItemOffsetUs = mediaItemOffsetUs;
|
||||
EditedMediaItem editedMediaItem,
|
||||
long durationUs,
|
||||
@Nullable Format trackFormat,
|
||||
boolean isLast) {
|
||||
if (trackFormat == null) {
|
||||
silentAudioGenerator.addSilence(durationUs);
|
||||
if (isLast) {
|
||||
queueEndOfStreamAfterSilence = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public DecoderInputBuffer getInputBuffer() {
|
||||
if (shouldGenerateSilence()) {
|
||||
return null;
|
||||
}
|
||||
return availableInputBuffers.peek();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void queueInputBuffer() {
|
||||
DecoderInputBuffer inputBuffer = availableInputBuffers.remove();
|
||||
inputBuffer.timeUs += mediaItemOffsetUs;
|
||||
pendingInputBuffers.add(inputBuffer);
|
||||
}
|
||||
|
||||
@ -220,16 +223,17 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isInputSilent()) {
|
||||
if (silentAudioGenerator.isEnded()) {
|
||||
queueEndOfStreamToEncoder();
|
||||
return false;
|
||||
}
|
||||
if (shouldGenerateSilence()) {
|
||||
feedEncoder(silentAudioGenerator.getBuffer());
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pendingInputBuffers.isEmpty()) {
|
||||
// Only read volatile variable queueEndOfStreamAfterSilence if there is a chance that end of
|
||||
// stream should be queued.
|
||||
if (!silentAudioGenerator.hasRemaining() && queueEndOfStreamAfterSilence) {
|
||||
queueEndOfStreamToEncoder();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -277,17 +281,18 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
* @return Whether it may be possible to feed more data immediately by calling this method again.
|
||||
*/
|
||||
private boolean feedProcessingPipelineFromInput() {
|
||||
if (isInputSilent()) {
|
||||
if (silentAudioGenerator.isEnded()) {
|
||||
audioProcessingPipeline.queueEndOfStream();
|
||||
return false;
|
||||
}
|
||||
if (shouldGenerateSilence()) {
|
||||
ByteBuffer inputData = silentAudioGenerator.getBuffer();
|
||||
audioProcessingPipeline.queueInput(inputData);
|
||||
return !inputData.hasRemaining();
|
||||
}
|
||||
|
||||
if (pendingInputBuffers.isEmpty()) {
|
||||
// Only read volatile variable queueEndOfStreamAfterSilence if there is a chance that end of
|
||||
// stream should be queued.
|
||||
if (!silentAudioGenerator.hasRemaining() && queueEndOfStreamAfterSilence) {
|
||||
audioProcessingPipeline.queueEndOfStream();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -370,8 +375,7 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
nextEncoderInputBufferTimeUs += bufferDurationUs;
|
||||
}
|
||||
|
||||
@EnsuresNonNullIf(expression = "silentAudioGenerator", result = true)
|
||||
private boolean isInputSilent() {
|
||||
return silentAudioGenerator != null;
|
||||
private boolean shouldGenerateSilence() {
|
||||
return silentAudioGenerator.hasRemaining() && pendingInputBuffers.isEmpty();
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* An {@link AssetLoader} that is composed of a sequence of non-overlapping {@linkplain AssetLoader
|
||||
@ -49,13 +48,13 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
private final List<EditedMediaItem> editedMediaItems;
|
||||
private final AtomicInteger currentMediaItemIndex;
|
||||
private final boolean forceAudioTrack;
|
||||
private final AssetLoader.Factory assetLoaderFactory;
|
||||
private final HandlerWrapper handler;
|
||||
private final Listener compositeAssetLoaderListener;
|
||||
private final Map<Integer, SampleConsumer> sampleConsumersByTrackType;
|
||||
private final Map<Integer, OnMediaItemChangedListener> mediaItemChangedListenersByTrackType;
|
||||
private final ImmutableList.Builder<ExportResult.ProcessedInput> processedInputsBuilder;
|
||||
private final AtomicLong totalDurationUs;
|
||||
private final AtomicInteger nonEndedTracks;
|
||||
|
||||
private AssetLoader currentAssetLoader;
|
||||
@ -65,11 +64,13 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public CompositeAssetLoader(
|
||||
EditedMediaItemSequence sequence,
|
||||
boolean forceAudioTrack,
|
||||
AssetLoader.Factory assetLoaderFactory,
|
||||
Looper looper,
|
||||
Listener listener,
|
||||
Clock clock) {
|
||||
this.editedMediaItems = sequence.editedMediaItems;
|
||||
editedMediaItems = sequence.editedMediaItems;
|
||||
this.forceAudioTrack = forceAudioTrack;
|
||||
this.assetLoaderFactory = assetLoaderFactory;
|
||||
compositeAssetLoaderListener = listener;
|
||||
currentMediaItemIndex = new AtomicInteger();
|
||||
@ -77,7 +78,6 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
sampleConsumersByTrackType = new HashMap<>();
|
||||
mediaItemChangedListenersByTrackType = new HashMap<>();
|
||||
processedInputsBuilder = new ImmutableList.Builder<>();
|
||||
totalDurationUs = new AtomicLong();
|
||||
nonEndedTracks = new AtomicInteger();
|
||||
// It's safe to use "this" because we don't start the AssetLoader before exiting the
|
||||
// constructor.
|
||||
@ -145,26 +145,24 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
@Override
|
||||
public void onDurationUs(long durationUs) {
|
||||
int currentMediaItemIndex = this.currentMediaItemIndex.get();
|
||||
checkArgument(
|
||||
durationUs != C.TIME_UNSET || currentMediaItemIndex == editedMediaItems.size() - 1,
|
||||
"Could not retrieve the duration for EditedMediaItem "
|
||||
+ currentMediaItemIndex
|
||||
+ ". An unset duration is only allowed for the last EditedMediaItem in the sequence.");
|
||||
currentDurationUs = durationUs;
|
||||
if (editedMediaItems.size() == 1) {
|
||||
compositeAssetLoaderListener.onDurationUs(durationUs);
|
||||
} else if (currentMediaItemIndex.get() == 0) {
|
||||
// TODO(b/252537210): support silent audio track for sequence of AssetLoaders (silent audio
|
||||
// track is the only usage of the duration).
|
||||
} else if (currentMediaItemIndex == 0) {
|
||||
compositeAssetLoaderListener.onDurationUs(C.TIME_UNSET);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTrackCount(int trackCount) {
|
||||
nonEndedTracks.set(trackCount);
|
||||
// TODO(b/252537210): support varying track count and track types between AssetLoaders.
|
||||
if (currentMediaItemIndex.get() == 0) {
|
||||
compositeAssetLoaderListener.onTrackCount(trackCount);
|
||||
} else if (trackCount != sampleConsumersByTrackType.size()) {
|
||||
throw new IllegalStateException(
|
||||
"The number of tracks is not allowed to change between MediaItems.");
|
||||
}
|
||||
nonEndedTracks.set(trackCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -177,25 +175,45 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
int trackType = MimeTypes.getTrackType(format.sampleMimeType);
|
||||
SampleConsumer sampleConsumer;
|
||||
if (currentMediaItemIndex.get() == 0) {
|
||||
boolean addAudioTrack =
|
||||
forceAudioTrack && nonEndedTracks.get() == 1 && trackType == C.TRACK_TYPE_VIDEO;
|
||||
int trackCount = nonEndedTracks.get() + (addAudioTrack ? 1 : 0);
|
||||
compositeAssetLoaderListener.onTrackCount(trackCount);
|
||||
sampleConsumer =
|
||||
new SampleConsumerWrapper(
|
||||
compositeAssetLoaderListener.onTrackAdded(
|
||||
format, supportedOutputTypes, streamStartPositionUs, streamOffsetUs));
|
||||
sampleConsumersByTrackType.put(trackType, sampleConsumer);
|
||||
if (addAudioTrack) {
|
||||
Format firstAudioFormat =
|
||||
new Format.Builder()
|
||||
.setSampleMimeType(MimeTypes.AUDIO_AAC)
|
||||
.setSampleRate(44100)
|
||||
.setChannelCount(2)
|
||||
.build();
|
||||
SampleConsumer audioSampleConsumer =
|
||||
new SampleConsumerWrapper(
|
||||
compositeAssetLoaderListener.onTrackAdded(
|
||||
firstAudioFormat,
|
||||
SUPPORTED_OUTPUT_TYPE_DECODED,
|
||||
/* streamStartPositionUs= */ streamOffsetUs,
|
||||
streamOffsetUs));
|
||||
sampleConsumersByTrackType.put(C.TRACK_TYPE_AUDIO, audioSampleConsumer);
|
||||
}
|
||||
} else {
|
||||
sampleConsumer =
|
||||
checkStateNotNull(
|
||||
sampleConsumersByTrackType.get(trackType),
|
||||
"The preceding MediaItem does not contain any track of type " + trackType);
|
||||
}
|
||||
@Nullable
|
||||
OnMediaItemChangedListener onMediaItemChangedListener =
|
||||
mediaItemChangedListenersByTrackType.get(trackType);
|
||||
if (onMediaItemChangedListener != null) {
|
||||
onMediaItemChangedListener.onMediaItemChanged(
|
||||
editedMediaItems.get(currentMediaItemIndex.get()),
|
||||
format,
|
||||
/* mediaItemOffsetUs= */ totalDurationUs.get());
|
||||
onMediaItemChanged(trackType, format);
|
||||
if (nonEndedTracks.get() == 1 && sampleConsumersByTrackType.size() == 2) {
|
||||
for (Map.Entry<Integer, SampleConsumer> entry : sampleConsumersByTrackType.entrySet()) {
|
||||
int listenerTrackType = entry.getKey();
|
||||
if (trackType != listenerTrackType) {
|
||||
onMediaItemChanged(listenerTrackType, /* format= */ null);
|
||||
}
|
||||
}
|
||||
}
|
||||
return sampleConsumer;
|
||||
}
|
||||
@ -205,6 +223,20 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
compositeAssetLoaderListener.onError(exportException);
|
||||
}
|
||||
|
||||
private void onMediaItemChanged(int trackType, @Nullable Format format) {
|
||||
@Nullable
|
||||
OnMediaItemChangedListener onMediaItemChangedListener =
|
||||
mediaItemChangedListenersByTrackType.get(trackType);
|
||||
if (onMediaItemChangedListener == null) {
|
||||
return;
|
||||
}
|
||||
onMediaItemChangedListener.onMediaItemChanged(
|
||||
editedMediaItems.get(currentMediaItemIndex.get()),
|
||||
currentDurationUs,
|
||||
format,
|
||||
/* isLast= */ currentMediaItemIndex.get() == editedMediaItems.size() - 1);
|
||||
}
|
||||
|
||||
private void addCurrentProcessedInput() {
|
||||
int currentMediaItemIndex = this.currentMediaItemIndex.get();
|
||||
if (currentMediaItemIndex >= processedInputsSize) {
|
||||
@ -258,8 +290,8 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
sampleConsumer.queueInputBuffer();
|
||||
}
|
||||
|
||||
// TODO(262693274): Test that concatenate 2 images or an image and a video works as expected
|
||||
// once Image Asset Loader Implementation is complete.
|
||||
// TODO(b/262693274): Test that concatenate 2 images or an image and a video works as expected
|
||||
// once ImageAssetLoader implementation is complete.
|
||||
@Override
|
||||
public void queueInputBitmap(Bitmap inputBitmap, long durationUs, int frameRate) {
|
||||
sampleConsumer.queueInputBitmap(inputBitmap, durationUs, frameRate);
|
||||
@ -298,7 +330,6 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||
}
|
||||
|
||||
private void switchAssetLoader() {
|
||||
totalDurationUs.addAndGet(currentDurationUs);
|
||||
handler.post(
|
||||
() -> {
|
||||
addCurrentProcessedInput();
|
||||
|
@ -25,6 +25,7 @@ import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/** Pipeline that muxes encoded samples without any transcoding or transformation. */
|
||||
/* package */ final class EncodedSamplePipeline extends SamplePipeline {
|
||||
@ -32,6 +33,7 @@ import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
private static final int MAX_INPUT_BUFFER_COUNT = 10;
|
||||
|
||||
private final Format format;
|
||||
private final AtomicLong nextMediaItemOffsetUs;
|
||||
private final Queue<DecoderInputBuffer> availableInputBuffers;
|
||||
private final Queue<DecoderInputBuffer> pendingInputBuffers;
|
||||
|
||||
@ -46,6 +48,7 @@ import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
FallbackListener fallbackListener) {
|
||||
super(format, streamStartPositionUs, muxerWrapper);
|
||||
this.format = format;
|
||||
nextMediaItemOffsetUs = new AtomicLong();
|
||||
availableInputBuffers = new ConcurrentLinkedDeque<>();
|
||||
ByteBuffer emptyBuffer = ByteBuffer.allocateDirect(0).order(ByteOrder.nativeOrder());
|
||||
for (int i = 0; i < MAX_INPUT_BUFFER_COUNT; i++) {
|
||||
@ -59,8 +62,12 @@ import java.util.concurrent.ConcurrentLinkedDeque;
|
||||
|
||||
@Override
|
||||
public void onMediaItemChanged(
|
||||
EditedMediaItem editedMediaItem, Format trackFormat, long mediaItemOffsetUs) {
|
||||
this.mediaItemOffsetUs = mediaItemOffsetUs;
|
||||
EditedMediaItem editedMediaItem,
|
||||
long durationUs,
|
||||
@Nullable Format trackFormat,
|
||||
boolean isLast) {
|
||||
mediaItemOffsetUs = nextMediaItemOffsetUs.get();
|
||||
nextMediaItemOffsetUs.addAndGet(durationUs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
package com.google.android.exoplayer2.transformer;
|
||||
|
||||
import androidx.annotation.Nullable;
|
||||
import com.google.android.exoplayer2.Format;
|
||||
import com.google.android.exoplayer2.MediaItem;
|
||||
|
||||
@ -28,11 +29,15 @@ import com.google.android.exoplayer2.MediaItem;
|
||||
* <p>Can be called from any thread.
|
||||
*
|
||||
* @param editedMediaItem The {@link MediaItem} with the transformations to apply to it.
|
||||
* @param trackFormat The {@link Format} of the {@link EditedMediaItem} track corresponding to the
|
||||
* {@link SamplePipeline}.
|
||||
* @param mediaItemOffsetUs The offset to add to the presentation timestamps of the {@link
|
||||
* EditedMediaItem} samples received by the {@link SamplePipeline}, in microseconds.
|
||||
* @param durationUs The duration of the {@link MediaItem}, in microseconds.
|
||||
* @param trackFormat The {@link Format} of the {@link MediaItem} track corresponding to the
|
||||
* {@link SamplePipeline}, or {@code null} if no such track was extracted.
|
||||
* @param isLast Whether the {@link MediaItem} is the last one passed to the {@link
|
||||
* SamplePipeline}.
|
||||
*/
|
||||
void onMediaItemChanged(
|
||||
EditedMediaItem editedMediaItem, Format trackFormat, long mediaItemOffsetUs);
|
||||
EditedMediaItem editedMediaItem,
|
||||
long durationUs,
|
||||
@Nullable Format trackFormat,
|
||||
boolean isLast);
|
||||
}
|
||||
|
@ -21,28 +21,43 @@ import com.google.android.exoplayer2.Format;
|
||||
import com.google.android.exoplayer2.util.Util;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/* package */ final class SilentAudioGenerator {
|
||||
private static final int DEFAULT_BUFFER_SIZE_FRAMES = 1024;
|
||||
|
||||
private final int sampleRate;
|
||||
private final int frameSize;
|
||||
private final ByteBuffer internalBuffer;
|
||||
private final AtomicLong remainingBytesToOutput;
|
||||
|
||||
private long remainingBytesToOutput;
|
||||
|
||||
public SilentAudioGenerator(Format format, long totalDurationUs) {
|
||||
int frameSize =
|
||||
public SilentAudioGenerator(Format format) {
|
||||
sampleRate = format.sampleRate;
|
||||
frameSize =
|
||||
Util.getPcmFrameSize(
|
||||
format.pcmEncoding == Format.NO_VALUE ? C.ENCODING_PCM_16BIT : format.pcmEncoding,
|
||||
format.channelCount);
|
||||
long outputFrameCount = (format.sampleRate * totalDurationUs) / C.MICROS_PER_SECOND;
|
||||
remainingBytesToOutput = frameSize * outputFrameCount;
|
||||
internalBuffer =
|
||||
ByteBuffer.allocateDirect(DEFAULT_BUFFER_SIZE_FRAMES * frameSize)
|
||||
.order(ByteOrder.nativeOrder());
|
||||
internalBuffer.flip();
|
||||
remainingBytesToOutput = new AtomicLong();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a silence duration to generate.
|
||||
*
|
||||
* <p>Can be called from any thread.
|
||||
*
|
||||
* @param durationUs The duration of the additional silence to generate, in microseconds.
|
||||
*/
|
||||
public void addSilence(long durationUs) {
|
||||
long outputFrameCount = (sampleRate * durationUs) / C.MICROS_PER_SECOND;
|
||||
remainingBytesToOutput.addAndGet(frameSize * outputFrameCount);
|
||||
}
|
||||
|
||||
public ByteBuffer getBuffer() {
|
||||
long remainingBytesToOutput = this.remainingBytesToOutput.get();
|
||||
if (!internalBuffer.hasRemaining()) {
|
||||
// "next" buffer.
|
||||
internalBuffer.clear();
|
||||
@ -50,12 +65,12 @@ import java.nio.ByteOrder;
|
||||
internalBuffer.limit((int) remainingBytesToOutput);
|
||||
}
|
||||
// Only reduce remaining bytes when we "generate" a new one.
|
||||
remainingBytesToOutput -= internalBuffer.remaining();
|
||||
this.remainingBytesToOutput.addAndGet(-internalBuffer.remaining());
|
||||
}
|
||||
return internalBuffer;
|
||||
}
|
||||
|
||||
public boolean isEnded() {
|
||||
return !internalBuffer.hasRemaining() && remainingBytesToOutput == 0;
|
||||
public boolean hasRemaining() {
|
||||
return internalBuffer.hasRemaining() || remainingBytesToOutput.get() > 0;
|
||||
}
|
||||
}
|
||||
|
@ -100,7 +100,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
private final ConditionVariable transformerConditionVariable;
|
||||
private final ExportResult.Builder exportResultBuilder;
|
||||
|
||||
private boolean forceAudioTrack;
|
||||
private boolean isDrainingPipelines;
|
||||
private @Transformer.ProgressState int progressState;
|
||||
private @MonotonicNonNull RuntimeException cancelException;
|
||||
@ -123,7 +122,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
Clock clock) {
|
||||
this.context = context;
|
||||
this.transformationRequest = transformationRequest;
|
||||
this.forceAudioTrack = composition.experimentalForceAudioTrack;
|
||||
this.encoderFactory = new CapturingEncoderFactory(encoderFactory);
|
||||
this.listener = listener;
|
||||
this.applicationHandler = applicationHandler;
|
||||
@ -137,7 +135,12 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
new ComponentListener(sequence, transmux, fallbackListener);
|
||||
compositeAssetLoader =
|
||||
new CompositeAssetLoader(
|
||||
sequence, assetLoaderFactory, internalLooper, componentListener, clock);
|
||||
sequence,
|
||||
composition.experimentalForceAudioTrack,
|
||||
assetLoaderFactory,
|
||||
internalLooper,
|
||||
componentListener,
|
||||
clock);
|
||||
samplePipelines = new ArrayList<>();
|
||||
muxerWrapper = new MuxerWrapper(outputPath, muxerFactory, componentListener);
|
||||
transformerConditionVariable = new ConditionVariable();
|
||||
@ -316,8 +319,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
|
||||
private boolean trackAdded;
|
||||
|
||||
private volatile long durationUs;
|
||||
|
||||
public ComponentListener(
|
||||
EditedMediaItemSequence sequence, boolean transmux, FallbackListener fallbackListener) {
|
||||
firstEditedMediaItem = sequence.editedMediaItems.get(0);
|
||||
@ -325,7 +326,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
this.transmux = transmux;
|
||||
this.fallbackListener = fallbackListener;
|
||||
trackCount = new AtomicInteger();
|
||||
durationUs = C.TIME_UNSET;
|
||||
}
|
||||
|
||||
// AssetLoader.Listener and MuxerWrapper.Listener implementation.
|
||||
@ -340,9 +340,7 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
// AssetLoader.Listener implementation.
|
||||
|
||||
@Override
|
||||
public void onDurationUs(long durationUs) {
|
||||
this.durationUs = durationUs;
|
||||
}
|
||||
public void onDurationUs(long durationUs) {}
|
||||
|
||||
@Override
|
||||
public void onTrackCount(int trackCount) {
|
||||
@ -365,14 +363,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
throws ExportException {
|
||||
int trackType = MimeTypes.getTrackType(firstInputFormat.sampleMimeType);
|
||||
if (!trackAdded) {
|
||||
if (forceAudioTrack) {
|
||||
if (trackCount.get() == 1 && trackType == C.TRACK_TYPE_VIDEO) {
|
||||
trackCount.incrementAndGet();
|
||||
} else {
|
||||
forceAudioTrack = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Call setTrackCount() methods here so that they are called from the same thread as the
|
||||
// MuxerWrapper and FallbackListener methods called when building the sample pipelines.
|
||||
muxerWrapper.setTrackCount(trackCount.get());
|
||||
@ -386,25 +376,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
compositeAssetLoader.addOnMediaItemChangedListener(samplePipeline, trackType);
|
||||
internalHandler.obtainMessage(MSG_REGISTER_SAMPLE_PIPELINE, samplePipeline).sendToTarget();
|
||||
|
||||
if (forceAudioTrack) {
|
||||
Format silentAudioFormat =
|
||||
new Format.Builder()
|
||||
.setSampleMimeType(MimeTypes.AUDIO_AAC)
|
||||
.setSampleRate(44100)
|
||||
.setChannelCount(2)
|
||||
.build();
|
||||
SamplePipeline audioSamplePipeline =
|
||||
getSamplePipeline(
|
||||
silentAudioFormat,
|
||||
SUPPORTED_OUTPUT_TYPE_DECODED,
|
||||
streamStartPositionUs,
|
||||
streamOffsetUs);
|
||||
compositeAssetLoader.addOnMediaItemChangedListener(audioSamplePipeline, C.TRACK_TYPE_AUDIO);
|
||||
internalHandler
|
||||
.obtainMessage(MSG_REGISTER_SAMPLE_PIPELINE, audioSamplePipeline)
|
||||
.sendToTarget();
|
||||
}
|
||||
|
||||
return samplePipeline;
|
||||
}
|
||||
|
||||
@ -478,7 +449,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
transformationRequest,
|
||||
firstEditedMediaItem.flattenForSlowMotion,
|
||||
firstEditedMediaItem.effects.audioProcessors,
|
||||
forceAudioTrack ? durationUs : C.TIME_UNSET,
|
||||
encoderFactory,
|
||||
muxerWrapper,
|
||||
fallbackListener);
|
||||
@ -524,9 +494,6 @@ import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
if (!firstEditedMediaItem.effects.audioProcessors.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
if (forceAudioTrack) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.checkerframework.checker.nullness.qual.MonotonicNonNull;
|
||||
import org.checkerframework.dataflow.qual.Pure;
|
||||
|
||||
@ -56,10 +57,10 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
/** MIME type to use for output video if the input type is not a video. */
|
||||
private static final String DEFAULT_OUTPUT_MIME_TYPE = MimeTypes.VIDEO_H265;
|
||||
|
||||
private final AtomicLong mediaItemOffsetUs;
|
||||
private final VideoFrameProcessor videoFrameProcessor;
|
||||
private final ColorInfo videoFrameProcessorInputColor;
|
||||
private final FrameInfo firstFrameInfo;
|
||||
|
||||
private final EncoderWrapper encoderWrapper;
|
||||
private final DecoderInputBuffer encoderOutputBuffer;
|
||||
|
||||
@ -85,6 +86,7 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
throws ExportException {
|
||||
super(firstInputFormat, streamStartPositionUs, muxerWrapper);
|
||||
|
||||
mediaItemOffsetUs = new AtomicLong();
|
||||
finalFramePresentationTimeUs = C.TIME_UNSET;
|
||||
|
||||
encoderOutputBuffer =
|
||||
@ -187,9 +189,13 @@ import org.checkerframework.dataflow.qual.Pure;
|
||||
|
||||
@Override
|
||||
public void onMediaItemChanged(
|
||||
EditedMediaItem editedMediaItem, Format trackFormat, long mediaItemOffsetUs) {
|
||||
EditedMediaItem editedMediaItem,
|
||||
long durationUs,
|
||||
@Nullable Format trackFormat,
|
||||
boolean isLast) {
|
||||
videoFrameProcessor.setInputFrameInfo(
|
||||
new FrameInfo.Builder(firstFrameInfo).setOffsetToAddUs(mediaItemOffsetUs).build());
|
||||
new FrameInfo.Builder(firstFrameInfo).setOffsetToAddUs(mediaItemOffsetUs.get()).build());
|
||||
mediaItemOffsetUs.addAndGet(durationUs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -29,27 +29,41 @@ import org.junit.runner.RunWith;
|
||||
public class SilentAudioGeneratorTest {
|
||||
|
||||
@Test
|
||||
public void numberOfBytesProduced_isCorrect() {
|
||||
public void addSilenceOnce_numberOfBytesProduced_isCorrect() {
|
||||
SilentAudioGenerator generator =
|
||||
new SilentAudioGenerator(
|
||||
new Format.Builder()
|
||||
.setSampleRate(88_200)
|
||||
.setPcmEncoding(C.ENCODING_PCM_16BIT)
|
||||
.setChannelCount(6)
|
||||
.build(),
|
||||
/* totalDurationUs= */ 3_000_000);
|
||||
int bytesOutput = 0;
|
||||
while (!generator.isEnded()) {
|
||||
ByteBuffer output = generator.getBuffer();
|
||||
bytesOutput += output.remaining();
|
||||
// "Consume" buffer.
|
||||
output.position(output.limit());
|
||||
}
|
||||
.build());
|
||||
|
||||
generator.addSilence(/* durationUs= */ 3_000_000);
|
||||
int bytesOutput = drainGenerator(generator);
|
||||
|
||||
// 88_200 * 12 * 3s = 3175200
|
||||
assertThat(bytesOutput).isEqualTo(3_175_200);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addSilenceTwice_numberOfBytesProduced_isCorrect() {
|
||||
SilentAudioGenerator generator =
|
||||
new SilentAudioGenerator(
|
||||
new Format.Builder()
|
||||
.setSampleRate(88_200)
|
||||
.setPcmEncoding(C.ENCODING_PCM_16BIT)
|
||||
.setChannelCount(6)
|
||||
.build());
|
||||
|
||||
generator.addSilence(/* durationUs= */ 3_000_000);
|
||||
int bytesOutput = drainGenerator(generator);
|
||||
generator.addSilence(/* durationUs= */ 1_500_000);
|
||||
bytesOutput += drainGenerator(generator);
|
||||
|
||||
// 88_200 * 12 * 4.5s = 4_762_800
|
||||
assertThat(bytesOutput).isEqualTo(4_762_800);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void lastBufferProduced_isCorrectSize() {
|
||||
SilentAudioGenerator generator =
|
||||
@ -58,11 +72,11 @@ public class SilentAudioGeneratorTest {
|
||||
.setSampleRate(44_100)
|
||||
.setPcmEncoding(C.ENCODING_PCM_16BIT)
|
||||
.setChannelCount(2)
|
||||
.build(),
|
||||
/* totalDurationUs= */ 1_000_000);
|
||||
.build());
|
||||
generator.addSilence(/* durationUs= */ 1_000_000);
|
||||
|
||||
int currentBufferSize = 0;
|
||||
while (!generator.isEnded()) {
|
||||
while (generator.hasRemaining()) {
|
||||
ByteBuffer output = generator.getBuffer();
|
||||
currentBufferSize = output.remaining();
|
||||
// "Consume" buffer.
|
||||
@ -82,9 +96,23 @@ public class SilentAudioGeneratorTest {
|
||||
.setSampleRate(48_000)
|
||||
.setPcmEncoding(C.ENCODING_PCM_16BIT)
|
||||
.setChannelCount(2)
|
||||
.build(),
|
||||
/* totalDurationUs= */ 5_000);
|
||||
.build());
|
||||
|
||||
generator.addSilence(/* durationUs= */ 5_000);
|
||||
|
||||
// 5_000 * 48_000 * 4 / 1_000_000 = 960
|
||||
assertThat(generator.getBuffer().remaining()).isEqualTo(960);
|
||||
}
|
||||
|
||||
/** Drains the generator and returns the number of bytes output. */
|
||||
private static int drainGenerator(SilentAudioGenerator generator) {
|
||||
int bytesOutput = 0;
|
||||
while (generator.hasRemaining()) {
|
||||
ByteBuffer output = generator.getBuffer();
|
||||
bytesOutput += output.remaining();
|
||||
// "Consume" buffer.
|
||||
output.position(output.limit());
|
||||
}
|
||||
return bytesOutput;
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user