Wrap AudioSamplePipeline input and processing within AudioGraph.

This is a no-op refactor.

PiperOrigin-RevId: 545421300
This commit is contained in:
samrobinson 2023-07-04 12:40:34 +00:00 committed by microkatz
parent 8ea79a13f9
commit c33a17d89c
5 changed files with 397 additions and 278 deletions

View File

@ -114,6 +114,7 @@ public interface AudioProcessor {
/** Exception thrown when the given {@link AudioFormat} can not be handled. */
final class UnhandledAudioFormatException extends Exception {
public final AudioFormat inputAudioFormat;
public UnhandledAudioFormatException(AudioFormat inputAudioFormat) {
this("Unhandled input format:", inputAudioFormat);
@ -121,6 +122,7 @@ public interface AudioProcessor {
public UnhandledAudioFormatException(String message, AudioFormat audioFormat) {
super(message + " " + audioFormat);
this.inputAudioFormat = audioFormat;
}
}

View File

@ -0,0 +1,362 @@
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.media3.transformer;
import static androidx.media3.common.audio.AudioProcessor.EMPTY_BUFFER;
import static androidx.media3.common.util.Assertions.checkArgument;
import static androidx.media3.common.util.Assertions.checkNotNull;
import static androidx.media3.common.util.Assertions.checkState;
import static androidx.media3.common.util.Assertions.checkStateNotNull;
import static androidx.media3.decoder.DecoderInputBuffer.BUFFER_REPLACEMENT_MODE_DIRECT;
import android.util.Pair;
import androidx.annotation.Nullable;
import androidx.media3.common.C;
import androidx.media3.common.Format;
import androidx.media3.common.MimeTypes;
import androidx.media3.common.audio.AudioProcessingPipeline;
import androidx.media3.common.audio.AudioProcessor;
import androidx.media3.common.audio.AudioProcessor.AudioFormat;
import androidx.media3.common.audio.AudioProcessor.UnhandledAudioFormatException;
import androidx.media3.common.audio.ChannelMixingAudioProcessor;
import androidx.media3.common.audio.ChannelMixingMatrix;
import androidx.media3.common.audio.SonicAudioProcessor;
import androidx.media3.common.audio.SpeedChangingAudioProcessor;
import androidx.media3.common.util.NullableType;
import androidx.media3.decoder.DecoderInputBuffer;
import com.google.common.collect.ImmutableList;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.atomic.AtomicReference;
/** Processes raw audio samples. */
/* package */ final class AudioGraph implements SampleConsumer, OnMediaItemChangedListener {
private static final int MAX_INPUT_BUFFER_COUNT = 10;
private final AudioFormat outputAudioFormat;
private final SilentAudioGenerator silentAudioGenerator;
private final Queue<DecoderInputBuffer> availableInputBuffers;
private final Queue<DecoderInputBuffer> pendingInputBuffers;
private final AtomicReference<@NullableType Pair<EditedMediaItem, @NullableType Format>>
pendingMediaItem;
@Nullable private DecoderInputBuffer currentInputBufferBeingOutput;
private AudioProcessingPipeline audioProcessingPipeline;
private boolean receivedFirstMediaItemCallback;
private boolean receivedEndOfStreamFromInput;
private volatile boolean queueEndOfStreamAfterSilence;
// TODO(b/260618558): Move silent audio generation upstream of this component.
public AudioGraph(Format firstInputFormat, EditedMediaItem firstEditedMediaItem)
throws UnhandledAudioFormatException {
checkArgument(firstInputFormat.pcmEncoding != Format.NO_VALUE);
availableInputBuffers = new ConcurrentLinkedDeque<>();
ByteBuffer emptyBuffer = ByteBuffer.allocateDirect(0).order(ByteOrder.nativeOrder());
for (int i = 0; i < MAX_INPUT_BUFFER_COUNT; i++) {
DecoderInputBuffer inputBuffer = new DecoderInputBuffer(BUFFER_REPLACEMENT_MODE_DIRECT);
inputBuffer.data = emptyBuffer;
availableInputBuffers.add(inputBuffer);
}
pendingInputBuffers = new ConcurrentLinkedDeque<>();
pendingMediaItem = new AtomicReference<>();
AudioFormat inputAudioFormat = new AudioFormat(firstInputFormat);
silentAudioGenerator = new SilentAudioGenerator(inputAudioFormat);
audioProcessingPipeline =
configureProcessing(
/* editedMediaItem= */ firstEditedMediaItem,
/* trackFormat= */ firstInputFormat,
/* inputAudioFormat= */ inputAudioFormat,
/* requiredOutputAudioFormat= */ AudioFormat.NOT_SET);
outputAudioFormat = audioProcessingPipeline.getOutputAudioFormat();
}
public AudioFormat getOutputAudioFormat() {
return outputAudioFormat;
}
/**
* Returns a {@link ByteBuffer} of output.
*
* @throws ExportException If the configuration of underlying components fails as a result of
* upstream changes.
*/
public ByteBuffer getOutput() throws ExportException {
ByteBuffer outputBuffer = getOutputInternal();
if (outputBuffer.hasRemaining()) {
return outputBuffer;
}
if (!hasDataToOutput() && pendingMediaItem.get() != null) {
try {
reconfigureProcessingForPendingMediaItem();
} catch (AudioProcessor.UnhandledAudioFormatException e) {
throw ExportException.createForAudioProcessing(e, e.inputAudioFormat);
}
}
return EMPTY_BUFFER;
}
@Override
public void onMediaItemChanged(
EditedMediaItem editedMediaItem,
long durationUs,
@Nullable Format trackFormat,
boolean isLast) {
if (trackFormat == null) {
checkState(
durationUs != C.TIME_UNSET,
"Could not generate silent audio because duration is unknown.");
silentAudioGenerator.addSilence(durationUs);
if (isLast) {
queueEndOfStreamAfterSilence = true;
}
} else {
checkState(MimeTypes.isAudio(trackFormat.sampleMimeType));
checkState(trackFormat.pcmEncoding != Format.NO_VALUE);
}
if (!receivedFirstMediaItemCallback) {
receivedFirstMediaItemCallback = true;
return;
}
pendingMediaItem.set(Pair.create(editedMediaItem, trackFormat));
}
@Override
@Nullable
public DecoderInputBuffer getInputBuffer() {
if (shouldGenerateSilence() || pendingMediaItem.get() != null) {
return null;
}
return availableInputBuffers.peek();
}
@Override
public boolean queueInputBuffer() {
checkState(pendingMediaItem.get() == null);
DecoderInputBuffer inputBuffer = availableInputBuffers.remove();
pendingInputBuffers.add(inputBuffer);
return true;
}
public void release() {
audioProcessingPipeline.reset();
}
/** Returns whether the input has ended and all queued data has been output. */
public boolean isEnded() {
if (hasDataToOutput()) {
return false;
}
if (pendingMediaItem.get() != null) {
return false;
}
// Only read volatile variable queueEndOfStreamAfterSilence if there is a chance that the
// graph has ended.
return receivedEndOfStreamFromInput || queueEndOfStreamAfterSilence;
}
private ByteBuffer getOutputInternal() {
if (!audioProcessingPipeline.isOperational()) {
return feedOutputFromInput();
}
// Ensure APP progresses as much as possible.
while (feedProcessingPipelineFromInput()) {}
return audioProcessingPipeline.getOutput();
}
/**
* Attempts to feed input data to the {@link AudioProcessingPipeline}.
*
* @return Whether the {@link AudioSamplePipeline} may be able to continue processing data.
*/
private boolean feedProcessingPipelineFromInput() {
if (shouldGenerateSilence()) {
ByteBuffer inputData = silentAudioGenerator.getBuffer();
audioProcessingPipeline.queueInput(inputData);
return !inputData.hasRemaining();
}
@Nullable DecoderInputBuffer pendingInputBuffer = pendingInputBuffers.peek();
if (pendingInputBuffer == null) {
if (pendingMediaItem.get() != null) {
audioProcessingPipeline.queueEndOfStream();
}
return false;
}
if (pendingInputBuffer.isEndOfStream()) {
audioProcessingPipeline.queueEndOfStream();
receivedEndOfStreamFromInput = true;
clearAndAddToAvailableBuffers(pendingInputBuffers.remove());
return false;
}
ByteBuffer inputData = checkNotNull(pendingInputBuffer.data);
audioProcessingPipeline.queueInput(inputData);
if (inputData.hasRemaining()) {
// APP could not consume all input.
return false;
}
// All input consumed, remove from pending and make available.
clearAndAddToAvailableBuffers(pendingInputBuffers.remove());
return true;
}
private ByteBuffer feedOutputFromInput() {
if (shouldGenerateSilence()) {
return silentAudioGenerator.getBuffer();
}
// When output is fed directly from input, the output ByteBuffer is linked to a specific
// DecoderInputBuffer. Therefore it must be consumed by the downstream component before it can
// be used for fresh input.
@Nullable DecoderInputBuffer previousOutputBuffer = currentInputBufferBeingOutput;
if (previousOutputBuffer != null) {
ByteBuffer data = checkStateNotNull(previousOutputBuffer.data);
if (data.hasRemaining()) {
// Currently output data has not been consumed, return it.
return data;
}
clearAndAddToAvailableBuffers(previousOutputBuffer);
currentInputBufferBeingOutput = null;
}
@Nullable DecoderInputBuffer currentInputBuffer = pendingInputBuffers.poll();
if (currentInputBuffer == null) {
return EMPTY_BUFFER;
}
@Nullable ByteBuffer currentInputBufferData = currentInputBuffer.data;
receivedEndOfStreamFromInput = currentInputBuffer.isEndOfStream();
// If there is no input data, make buffer available, ensuring underlying data reference is not
// kept. Data associated with EOS buffer is ignored.
if (currentInputBufferData == null
|| !currentInputBufferData.hasRemaining()
|| receivedEndOfStreamFromInput) {
clearAndAddToAvailableBuffers(currentInputBuffer);
return EMPTY_BUFFER;
}
currentInputBufferBeingOutput = currentInputBuffer;
return currentInputBufferData;
}
private boolean hasDataToOutput() {
if (currentInputBufferBeingOutput != null
&& currentInputBufferBeingOutput.data != null
&& currentInputBufferBeingOutput.data.hasRemaining()) {
return true;
}
if (silentAudioGenerator.hasRemaining()) {
return true;
}
if (!pendingInputBuffers.isEmpty()) {
return true;
}
if (audioProcessingPipeline.isOperational() && !audioProcessingPipeline.isEnded()) {
return true;
}
return false;
}
private boolean shouldGenerateSilence() {
return silentAudioGenerator.hasRemaining() && pendingInputBuffers.isEmpty();
}
private void clearAndAddToAvailableBuffers(DecoderInputBuffer inputBuffer) {
inputBuffer.clear();
inputBuffer.timeUs = 0;
availableInputBuffers.add(inputBuffer);
}
/**
* Reconfigures audio processing based on the pending {@linkplain #onMediaItemChanged media item
* change}.
*
* <p>Before reconfiguration, all {@linkplain #hasDataToOutput() pending data} must be consumed
* through {@link #getOutput()}.
*/
private void reconfigureProcessingForPendingMediaItem() throws UnhandledAudioFormatException {
checkState(!hasDataToOutput());
Pair<EditedMediaItem, @NullableType Format> pendingChange =
checkStateNotNull(pendingMediaItem.get());
AudioFormat pendingAudioFormat =
pendingChange.second != null
? new AudioFormat(pendingChange.second)
: silentAudioGenerator.audioFormat;
audioProcessingPipeline =
configureProcessing(
/* editedMediaItem= */ pendingChange.first,
/* trackFormat= */ pendingChange.second,
/* inputAudioFormat= */ pendingAudioFormat,
/* requiredOutputAudioFormat= */ outputAudioFormat);
pendingMediaItem.set(null);
receivedEndOfStreamFromInput = false;
}
private static AudioProcessingPipeline configureProcessing(
EditedMediaItem editedMediaItem,
@Nullable Format trackFormat,
AudioFormat inputAudioFormat,
AudioFormat requiredOutputAudioFormat)
throws UnhandledAudioFormatException {
ImmutableList.Builder<AudioProcessor> audioProcessors = new ImmutableList.Builder<>();
if (editedMediaItem.flattenForSlowMotion
&& trackFormat != null
&& trackFormat.metadata != null) {
audioProcessors.add(
new SpeedChangingAudioProcessor(new SegmentSpeedProvider(trackFormat.metadata)));
}
audioProcessors.addAll(editedMediaItem.effects.audioProcessors);
// Ensure the output from APP matches what the encoder is configured to receive.
if (!requiredOutputAudioFormat.equals(AudioFormat.NOT_SET)) {
SonicAudioProcessor sampleRateChanger = new SonicAudioProcessor();
sampleRateChanger.setOutputSampleRateHz(requiredOutputAudioFormat.sampleRate);
audioProcessors.add(sampleRateChanger);
// TODO(b/262706549): Handle channel mixing with AudioMixer.
if (requiredOutputAudioFormat.channelCount <= 2) {
// ChannelMixingMatrix.create only has defaults for mono/stereo input/output.
ChannelMixingAudioProcessor channelCountChanger = new ChannelMixingAudioProcessor();
channelCountChanger.putChannelMixingMatrix(
ChannelMixingMatrix.create(
/* inputChannelCount= */ 1, requiredOutputAudioFormat.channelCount));
channelCountChanger.putChannelMixingMatrix(
ChannelMixingMatrix.create(
/* inputChannelCount= */ 2, requiredOutputAudioFormat.channelCount));
audioProcessors.add(channelCountChanger);
}
}
AudioProcessingPipeline audioProcessingPipeline =
new AudioProcessingPipeline(audioProcessors.build());
AudioFormat outputAudioFormat = audioProcessingPipeline.configure(inputAudioFormat);
if (!requiredOutputAudioFormat.equals(AudioFormat.NOT_SET)
&& !outputAudioFormat.equals(requiredOutputAudioFormat)) {
throw new UnhandledAudioFormatException(
"Audio format can not be modified to match existing downstream format", inputAudioFormat);
}
audioProcessingPipeline.flush();
return audioProcessingPipeline;
}
}

View File

@ -19,56 +19,31 @@ package androidx.media3.transformer;
import static androidx.media3.common.util.Assertions.checkArgument;
import static androidx.media3.common.util.Assertions.checkNotNull;
import static androidx.media3.common.util.Assertions.checkState;
import static androidx.media3.common.util.Assertions.checkStateNotNull;
import static androidx.media3.decoder.DecoderInputBuffer.BUFFER_REPLACEMENT_MODE_DIRECT;
import static androidx.media3.decoder.DecoderInputBuffer.BUFFER_REPLACEMENT_MODE_DISABLED;
import static java.lang.Math.min;
import android.util.Pair;
import androidx.annotation.Nullable;
import androidx.media3.common.C;
import androidx.media3.common.Format;
import androidx.media3.common.MimeTypes;
import androidx.media3.common.audio.AudioProcessingPipeline;
import androidx.media3.common.audio.AudioProcessor;
import androidx.media3.common.audio.AudioProcessor.AudioFormat;
import androidx.media3.common.audio.ChannelMixingAudioProcessor;
import androidx.media3.common.audio.ChannelMixingMatrix;
import androidx.media3.common.audio.SonicAudioProcessor;
import androidx.media3.common.audio.SpeedChangingAudioProcessor;
import androidx.media3.common.util.NullableType;
import androidx.media3.common.util.Util;
import androidx.media3.decoder.DecoderInputBuffer;
import com.google.common.collect.ImmutableList;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.atomic.AtomicReference;
import org.checkerframework.dataflow.qual.Pure;
/** Pipeline to process, re-encode and mux raw audio samples. */
/* package */ final class AudioSamplePipeline extends SamplePipeline {
private static final int MAX_INPUT_BUFFER_COUNT = 10;
private static final int DEFAULT_ENCODER_BITRATE = 128 * 1024;
private final SilentAudioGenerator silentAudioGenerator;
private final Queue<DecoderInputBuffer> availableInputBuffers;
private final Queue<DecoderInputBuffer> pendingInputBuffers;
private final Codec encoder;
private final AudioFormat encoderInputAudioFormat;
private final DecoderInputBuffer encoderInputBuffer;
private final DecoderInputBuffer encoderOutputBuffer;
private final AtomicReference<@NullableType Pair<EditedMediaItem, @NullableType Format>>
pendingMediaItem;
private boolean receivedFirstMediaItemCallback;
private AudioProcessingPipeline audioProcessingPipeline;
private final AudioGraph audioGraph;
private long encoderTotalInputBytes;
private volatile boolean queueEndOfStreamAfterSilence;
// TODO(b/260618558): Move silent audio generation upstream of this component.
public AudioSamplePipeline(
Format firstAssetLoaderInputFormat,
Format firstPipelineInputFormat,
@ -81,29 +56,14 @@ import org.checkerframework.dataflow.qual.Pure;
super(firstAssetLoaderInputFormat, muxerWrapper);
checkArgument(firstPipelineInputFormat.pcmEncoding != Format.NO_VALUE);
availableInputBuffers = new ConcurrentLinkedDeque<>();
ByteBuffer emptyBuffer = ByteBuffer.allocateDirect(0).order(ByteOrder.nativeOrder());
for (int i = 0; i < MAX_INPUT_BUFFER_COUNT; i++) {
DecoderInputBuffer inputBuffer = new DecoderInputBuffer(BUFFER_REPLACEMENT_MODE_DIRECT);
inputBuffer.data = emptyBuffer;
availableInputBuffers.add(inputBuffer);
try {
audioGraph = new AudioGraph(firstPipelineInputFormat, firstEditedMediaItem);
} catch (AudioProcessor.UnhandledAudioFormatException e) {
throw ExportException.createForAudioProcessing(e, e.inputAudioFormat);
}
pendingInputBuffers = new ConcurrentLinkedDeque<>();
encoderInputBuffer = new DecoderInputBuffer(BUFFER_REPLACEMENT_MODE_DISABLED);
encoderOutputBuffer = new DecoderInputBuffer(BUFFER_REPLACEMENT_MODE_DISABLED);
pendingMediaItem = new AtomicReference<>();
AudioFormat inputAudioFormat = new AudioFormat(firstPipelineInputFormat);
silentAudioGenerator = new SilentAudioGenerator(inputAudioFormat);
audioProcessingPipeline =
configureProcessing(
/* editedMediaItem= */ firstEditedMediaItem,
/* trackFormat= */ firstPipelineInputFormat,
/* inputAudioFormat= */ inputAudioFormat,
/* requiredOutputAudioFormat= */ AudioFormat.NOT_SET);
AudioFormat outputAudioFormat = audioProcessingPipeline.getOutputAudioFormat();
checkState(!outputAudioFormat.equals(AudioFormat.NOT_SET));
encoderInputAudioFormat = audioGraph.getOutputAudioFormat();
checkState(!encoderInputAudioFormat.equals(AudioFormat.NOT_SET));
encoderInputAudioFormat = outputAudioFormat;
Format requestedEncoderFormat =
new Format.Builder()
.setSampleMimeType(
@ -125,6 +85,8 @@ import org.checkerframework.dataflow.qual.Pure;
requestedEncoderFormat,
muxerWrapper.getSupportedSampleMimeTypes(C.TRACK_TYPE_AUDIO)))
.build());
encoderInputBuffer = new DecoderInputBuffer(BUFFER_REPLACEMENT_MODE_DISABLED);
encoderOutputBuffer = new DecoderInputBuffer(BUFFER_REPLACEMENT_MODE_DISABLED);
fallbackListener.onTransformationRequestFinalized(
createFallbackTransformationRequest(
@ -139,56 +101,47 @@ import org.checkerframework.dataflow.qual.Pure;
long durationUs,
@Nullable Format trackFormat,
boolean isLast) {
if (trackFormat == null) {
checkState(
durationUs != C.TIME_UNSET,
"Could not generate silent audio because duration is unknown.");
silentAudioGenerator.addSilence(durationUs);
if (isLast) {
queueEndOfStreamAfterSilence = true;
}
} else {
checkState(MimeTypes.isAudio(trackFormat.sampleMimeType));
checkState(trackFormat.pcmEncoding != Format.NO_VALUE);
}
if (!receivedFirstMediaItemCallback) {
receivedFirstMediaItemCallback = true;
return;
}
pendingMediaItem.set(Pair.create(editedMediaItem, trackFormat));
audioGraph.onMediaItemChanged(editedMediaItem, durationUs, trackFormat, isLast);
}
@Override
@Nullable
public DecoderInputBuffer getInputBuffer() {
if (shouldGenerateSilence() || pendingMediaItem.get() != null) {
return null;
}
return availableInputBuffers.peek();
return audioGraph.getInputBuffer();
}
@Override
public boolean queueInputBuffer() {
checkState(pendingMediaItem.get() == null);
DecoderInputBuffer inputBuffer = availableInputBuffers.remove();
pendingInputBuffers.add(inputBuffer);
return true;
return audioGraph.queueInputBuffer();
}
@Override
public void release() {
audioProcessingPipeline.reset();
audioGraph.release();
encoder.release();
}
@Override
protected boolean processDataUpToMuxer() throws ExportException {
if (!audioProcessingPipeline.isOperational()) {
return feedEncoderFromInput();
// Returns same buffer until consumed. getOutput internally progresses underlying input data.
ByteBuffer audioGraphBuffer = audioGraph.getOutput();
if (!encoder.maybeDequeueInputBuffer(encoderInputBuffer)) {
return false;
}
return feedEncoderFromProcessingPipeline() || feedProcessingPipelineFromInput();
if (audioGraph.isEnded()) {
queueEndOfStreamToEncoder();
return false;
}
if (!audioGraphBuffer.hasRemaining()) {
return false;
}
feedEncoder(audioGraphBuffer);
return true;
}
@Override
@ -219,147 +172,6 @@ import org.checkerframework.dataflow.qual.Pure;
return encoder.isEnded();
}
/**
* Reconfigures audio processing based on the pending {@linkplain #onMediaItemChanged media item
* change}.
*
* <p>Before reconfiguration, all pending buffers must be fully processed and drained to the
* encoder, however end of stream buffers should be handled so the encoder is not {@link
* #queueEndOfStreamToEncoder() queued end of stream}.
*/
private void reconfigureProcessingForPendingMediaItem() throws ExportException {
Pair<EditedMediaItem, @NullableType Format> pendingChange =
checkStateNotNull(pendingMediaItem.get());
AudioFormat pendingAudioFormat =
pendingChange.second != null
? new AudioFormat(pendingChange.second)
: silentAudioGenerator.audioFormat;
audioProcessingPipeline =
configureProcessing(
/* editedMediaItem= */ pendingChange.first,
/* trackFormat= */ pendingChange.second,
/* inputAudioFormat= */ pendingAudioFormat,
/* requiredOutputAudioFormat= */ encoderInputAudioFormat);
pendingMediaItem.set(null);
}
/**
* Attempts to pass input data to the encoder.
*
* @return Whether the {@link AudioSamplePipeline} may be able to continue processing data.
*/
private boolean feedEncoderFromInput() throws ExportException {
if (!encoder.maybeDequeueInputBuffer(encoderInputBuffer)) {
return false;
}
if (shouldGenerateSilence()) {
feedEncoder(silentAudioGenerator.getBuffer());
return true;
}
if (pendingInputBuffers.isEmpty()) {
if (pendingMediaItem.get() != null) {
reconfigureProcessingForPendingMediaItem();
return true;
}
// Only read volatile variable queueEndOfStreamAfterSilence if there is a chance that end of
// stream should be queued.
if (!silentAudioGenerator.hasRemaining() && queueEndOfStreamAfterSilence) {
queueEndOfStreamToEncoder();
}
return false;
}
DecoderInputBuffer pendingInputBuffer = pendingInputBuffers.element();
if (pendingInputBuffer.isEndOfStream()) {
if (pendingMediaItem.get() == null) {
queueEndOfStreamToEncoder();
}
removePendingInputBuffer();
return false;
}
ByteBuffer inputData = checkNotNull(pendingInputBuffer.data);
feedEncoder(inputData);
if (!inputData.hasRemaining()) {
removePendingInputBuffer();
}
return true;
}
/**
* Attempts to feed audio processor output data to the encoder.
*
* @return Whether the {@link AudioSamplePipeline} may be able to continue processing data.
*/
private boolean feedEncoderFromProcessingPipeline() throws ExportException {
if (!encoder.maybeDequeueInputBuffer(encoderInputBuffer)) {
return false;
}
ByteBuffer processingPipelineOutputBuffer = audioProcessingPipeline.getOutput();
if (!processingPipelineOutputBuffer.hasRemaining()) {
if (audioProcessingPipeline.isEnded()) {
if (pendingMediaItem.get() != null) {
reconfigureProcessingForPendingMediaItem();
return true;
}
queueEndOfStreamToEncoder();
}
return false;
}
feedEncoder(processingPipelineOutputBuffer);
return true;
}
/**
* Attempts to feed input data to the {@link AudioProcessingPipeline}.
*
* @return Whether the {@link AudioSamplePipeline} may be able to continue processing data.
*/
private boolean feedProcessingPipelineFromInput() {
if (shouldGenerateSilence()) {
ByteBuffer inputData = silentAudioGenerator.getBuffer();
audioProcessingPipeline.queueInput(inputData);
return !inputData.hasRemaining();
}
if (pendingInputBuffers.isEmpty()) {
// Only read volatile variable queueEndOfStreamAfterSilence if there is a chance that end of
// stream should be queued.
if (pendingMediaItem.get() != null
|| (!silentAudioGenerator.hasRemaining() && queueEndOfStreamAfterSilence)) {
audioProcessingPipeline.queueEndOfStream();
}
return false;
}
DecoderInputBuffer pendingInputBuffer = pendingInputBuffers.element();
if (pendingInputBuffer.isEndOfStream()) {
audioProcessingPipeline.queueEndOfStream();
removePendingInputBuffer();
return false;
}
ByteBuffer inputData = checkNotNull(pendingInputBuffer.data);
audioProcessingPipeline.queueInput(inputData);
if (inputData.hasRemaining()) {
return false;
}
removePendingInputBuffer();
return true;
}
private void removePendingInputBuffer() {
DecoderInputBuffer inputBuffer = pendingInputBuffers.remove();
inputBuffer.clear();
inputBuffer.timeUs = 0;
availableInputBuffers.add(inputBuffer);
}
/**
* Feeds as much data as possible between the current position and limit of the specified {@link
* ByteBuffer} to the encoder, and advances its position by the number of bytes fed.
@ -401,61 +213,4 @@ import org.checkerframework.dataflow.qual.Pure;
long totalFramesWritten = encoderTotalInputBytes / encoderInputAudioFormat.bytesPerFrame;
return (totalFramesWritten * C.MICROS_PER_SECOND) / encoderInputAudioFormat.sampleRate;
}
private boolean shouldGenerateSilence() {
return silentAudioGenerator.hasRemaining() && pendingInputBuffers.isEmpty();
}
private static AudioProcessingPipeline configureProcessing(
EditedMediaItem editedMediaItem,
@Nullable Format trackFormat,
AudioFormat inputAudioFormat,
AudioFormat requiredOutputAudioFormat)
throws ExportException {
ImmutableList.Builder<AudioProcessor> audioProcessors = new ImmutableList.Builder<>();
if (editedMediaItem.flattenForSlowMotion
&& trackFormat != null
&& trackFormat.metadata != null) {
audioProcessors.add(
new SpeedChangingAudioProcessor(new SegmentSpeedProvider(trackFormat.metadata)));
}
audioProcessors.addAll(editedMediaItem.effects.audioProcessors);
// Ensure the output from APP matches what the encoder is configured to receive.
if (!requiredOutputAudioFormat.equals(AudioFormat.NOT_SET)) {
SonicAudioProcessor sampleRateChanger = new SonicAudioProcessor();
sampleRateChanger.setOutputSampleRateHz(requiredOutputAudioFormat.sampleRate);
audioProcessors.add(sampleRateChanger);
// TODO(b/262706549): Handle channel mixing with AudioMixer.
if (requiredOutputAudioFormat.channelCount <= 2) {
// ChannelMixingMatrix.create only has defaults for mono/stereo input/output.
ChannelMixingAudioProcessor channelCountChanger = new ChannelMixingAudioProcessor();
channelCountChanger.putChannelMixingMatrix(
ChannelMixingMatrix.create(
/* inputChannelCount= */ 1, requiredOutputAudioFormat.channelCount));
channelCountChanger.putChannelMixingMatrix(
ChannelMixingMatrix.create(
/* inputChannelCount= */ 2, requiredOutputAudioFormat.channelCount));
audioProcessors.add(channelCountChanger);
}
}
AudioProcessingPipeline audioProcessingPipeline =
new AudioProcessingPipeline(audioProcessors.build());
try {
AudioFormat outputAudioFormat = audioProcessingPipeline.configure(inputAudioFormat);
if (!requiredOutputAudioFormat.equals(AudioFormat.NOT_SET)
&& !outputAudioFormat.equals(requiredOutputAudioFormat)) {
throw new AudioProcessor.UnhandledAudioFormatException(
"Audio format can not be modified to match existing downstream format",
inputAudioFormat);
}
} catch (AudioProcessor.UnhandledAudioFormatException unhandledAudioFormatException) {
throw ExportException.createForAudioProcessing(
unhandledAudioFormatException, inputAudioFormat);
}
audioProcessingPipeline.flush();
return audioProcessingPipeline;
}
}

View File

@ -64,7 +64,7 @@ import java.util.List;
* this method again.
*/
public final boolean processData() throws ExportException {
return feedMuxer() || processDataUpToMuxer();
return feedMuxer() || (!isMuxerInputEnded() && processDataUpToMuxer());
}
/** Releases all resources held by the pipeline. */

View File

@ -366,7 +366,7 @@ import java.util.concurrent.atomic.AtomicInteger;
public boolean queueInputBuffer() {
DecoderInputBuffer inputBuffer = checkStateNotNull(sampleConsumer.getInputBuffer());
long globalTimestampUs = totalDurationUs + inputBuffer.timeUs;
if (isLooping && globalTimestampUs >= maxSequenceDurationUs) {
if (isLooping && (globalTimestampUs >= maxSequenceDurationUs || audioLoopingEnded)) {
if (isMaxSequenceDurationUsFinal && !audioLoopingEnded) {
checkNotNull(inputBuffer.data).limit(0);
inputBuffer.setFlags(C.BUFFER_FLAG_END_OF_STREAM);