Make the output mode more explicit in DefaultAudioSink
PiperOrigin-RevId: 322609230
This commit is contained in:
parent
bdadd572e2
commit
21fe2f1edf
@ -34,12 +34,12 @@ import java.lang.reflect.Method;
|
|||||||
* Wraps an {@link AudioTrack}, exposing a position based on {@link
|
* Wraps an {@link AudioTrack}, exposing a position based on {@link
|
||||||
* AudioTrack#getPlaybackHeadPosition()} and {@link AudioTrack#getTimestamp(AudioTimestamp)}.
|
* AudioTrack#getPlaybackHeadPosition()} and {@link AudioTrack#getTimestamp(AudioTimestamp)}.
|
||||||
*
|
*
|
||||||
* <p>Call {@link #setAudioTrack(AudioTrack, int, int, int)} to set the audio track to wrap. Call
|
* <p>Call {@link #setAudioTrack(AudioTrack, boolean, int, int, int)} to set the audio track to
|
||||||
* {@link #mayHandleBuffer(long)} if there is input data to write to the track. If it returns false,
|
* wrap. Call {@link #mayHandleBuffer(long)} if there is input data to write to the track. If it
|
||||||
* the audio track position is stabilizing and no data may be written. Call {@link #start()}
|
* returns false, the audio track position is stabilizing and no data may be written. Call {@link
|
||||||
* immediately before calling {@link AudioTrack#play()}. Call {@link #pause()} when pausing the
|
* #start()} immediately before calling {@link AudioTrack#play()}. Call {@link #pause()} when
|
||||||
* track. Call {@link #handleEndOfStream(long)} when no more data will be written to the track. When
|
* pausing the track. Call {@link #handleEndOfStream(long)} when no more data will be written to the
|
||||||
* the audio track will no longer be used, call {@link #reset()}.
|
* track. When the audio track will no longer be used, call {@link #reset()}.
|
||||||
*/
|
*/
|
||||||
/* package */ final class AudioTrackPositionTracker {
|
/* package */ final class AudioTrackPositionTracker {
|
||||||
|
|
||||||
@ -193,6 +193,7 @@ import java.lang.reflect.Method;
|
|||||||
* track's position, until the next call to {@link #reset()}.
|
* track's position, until the next call to {@link #reset()}.
|
||||||
*
|
*
|
||||||
* @param audioTrack The audio track to wrap.
|
* @param audioTrack The audio track to wrap.
|
||||||
|
* @param isPassthrough Whether passthrough mode is being used.
|
||||||
* @param outputEncoding The encoding of the audio track.
|
* @param outputEncoding The encoding of the audio track.
|
||||||
* @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
|
* @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
|
||||||
* otherwise.
|
* otherwise.
|
||||||
@ -200,6 +201,7 @@ import java.lang.reflect.Method;
|
|||||||
*/
|
*/
|
||||||
public void setAudioTrack(
|
public void setAudioTrack(
|
||||||
AudioTrack audioTrack,
|
AudioTrack audioTrack,
|
||||||
|
boolean isPassthrough,
|
||||||
@C.Encoding int outputEncoding,
|
@C.Encoding int outputEncoding,
|
||||||
int outputPcmFrameSize,
|
int outputPcmFrameSize,
|
||||||
int bufferSize) {
|
int bufferSize) {
|
||||||
@ -208,7 +210,7 @@ import java.lang.reflect.Method;
|
|||||||
this.bufferSize = bufferSize;
|
this.bufferSize = bufferSize;
|
||||||
audioTimestampPoller = new AudioTimestampPoller(audioTrack);
|
audioTimestampPoller = new AudioTimestampPoller(audioTrack);
|
||||||
outputSampleRate = audioTrack.getSampleRate();
|
outputSampleRate = audioTrack.getSampleRate();
|
||||||
needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
|
needsPassthroughWorkarounds = isPassthrough && needsPassthroughWorkarounds(outputEncoding);
|
||||||
isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
|
isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
|
||||||
bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
|
bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
|
||||||
lastRawPlaybackHeadPosition = 0;
|
lastRawPlaybackHeadPosition = 0;
|
||||||
@ -390,7 +392,7 @@ import java.lang.reflect.Method;
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Resets the position tracker. Should be called when the audio track previously passed to {@link
|
* Resets the position tracker. Should be called when the audio track previously passed to {@link
|
||||||
* #setAudioTrack(AudioTrack, int, int, int)} is no longer in use.
|
* #setAudioTrack(AudioTrack, boolean, int, int, int)} is no longer in use.
|
||||||
*/
|
*/
|
||||||
public void reset() {
|
public void reset() {
|
||||||
resetSyncParams();
|
resetSyncParams();
|
||||||
|
@ -22,6 +22,7 @@ import android.media.AudioTrack;
|
|||||||
import android.os.ConditionVariable;
|
import android.os.ConditionVariable;
|
||||||
import android.os.Handler;
|
import android.os.Handler;
|
||||||
import android.os.SystemClock;
|
import android.os.SystemClock;
|
||||||
|
import androidx.annotation.IntDef;
|
||||||
import androidx.annotation.Nullable;
|
import androidx.annotation.Nullable;
|
||||||
import androidx.annotation.RequiresApi;
|
import androidx.annotation.RequiresApi;
|
||||||
import com.google.android.exoplayer2.C;
|
import com.google.android.exoplayer2.C;
|
||||||
@ -31,6 +32,9 @@ import com.google.android.exoplayer2.audio.AudioProcessor.UnhandledAudioFormatEx
|
|||||||
import com.google.android.exoplayer2.util.Assertions;
|
import com.google.android.exoplayer2.util.Assertions;
|
||||||
import com.google.android.exoplayer2.util.Log;
|
import com.google.android.exoplayer2.util.Log;
|
||||||
import com.google.android.exoplayer2.util.Util;
|
import com.google.android.exoplayer2.util.Util;
|
||||||
|
import java.lang.annotation.Documented;
|
||||||
|
import java.lang.annotation.Retention;
|
||||||
|
import java.lang.annotation.RetentionPolicy;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.nio.ByteOrder;
|
import java.nio.ByteOrder;
|
||||||
import java.util.ArrayDeque;
|
import java.util.ArrayDeque;
|
||||||
@ -201,6 +205,15 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Documented
|
||||||
|
@Retention(RetentionPolicy.SOURCE)
|
||||||
|
@IntDef({OUTPUT_MODE_PCM, OUTPUT_MODE_OFFLOAD, OUTPUT_MODE_PASSTHROUGH})
|
||||||
|
private @interface OutputMode {}
|
||||||
|
|
||||||
|
private static final int OUTPUT_MODE_PCM = 0;
|
||||||
|
private static final int OUTPUT_MODE_OFFLOAD = 1;
|
||||||
|
private static final int OUTPUT_MODE_PASSTHROUGH = 2;
|
||||||
|
|
||||||
/** A minimum length for the {@link AudioTrack} buffer, in microseconds. */
|
/** A minimum length for the {@link AudioTrack} buffer, in microseconds. */
|
||||||
private static final long MIN_BUFFER_DURATION_US = 250_000;
|
private static final long MIN_BUFFER_DURATION_US = 250_000;
|
||||||
/** A maximum length for the {@link AudioTrack} buffer, in microseconds. */
|
/** A maximum length for the {@link AudioTrack} buffer, in microseconds. */
|
||||||
@ -440,14 +453,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
// guaranteed to support.
|
// guaranteed to support.
|
||||||
return SINK_FORMAT_SUPPORTED_WITH_TRANSCODING;
|
return SINK_FORMAT_SUPPORTED_WITH_TRANSCODING;
|
||||||
}
|
}
|
||||||
if (enableOffload
|
if (enableOffload && isOffloadedPlaybackSupported(format, audioAttributes)) {
|
||||||
&& isOffloadedPlaybackSupported(
|
|
||||||
format.channelCount,
|
|
||||||
format.sampleRate,
|
|
||||||
format.encoding,
|
|
||||||
audioAttributes,
|
|
||||||
format.encoderDelay,
|
|
||||||
format.encoderPadding)) {
|
|
||||||
return SINK_FORMAT_SUPPORTED_DIRECTLY;
|
return SINK_FORMAT_SUPPORTED_DIRECTLY;
|
||||||
}
|
}
|
||||||
if (isPassthroughPlaybackSupported(format)) {
|
if (isPassthroughPlaybackSupported(format)) {
|
||||||
@ -469,20 +475,18 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
@Override
|
@Override
|
||||||
public void configure(Format inputFormat, int specifiedBufferSize, @Nullable int[] outputChannels)
|
public void configure(Format inputFormat, int specifiedBufferSize, @Nullable int[] outputChannels)
|
||||||
throws ConfigurationException {
|
throws ConfigurationException {
|
||||||
boolean isInputPcm = Util.isEncodingLinearPcm(inputFormat.encoding);
|
|
||||||
|
|
||||||
int inputPcmFrameSize;
|
int inputPcmFrameSize;
|
||||||
@Nullable AudioProcessor[] availableAudioProcessors;
|
@Nullable AudioProcessor[] availableAudioProcessors;
|
||||||
boolean canApplyPlaybackParameters;
|
boolean canApplyPlaybackParameters;
|
||||||
|
|
||||||
boolean useOffload;
|
@OutputMode int outputMode;
|
||||||
@C.Encoding int outputEncoding;
|
@C.Encoding int outputEncoding;
|
||||||
int outputSampleRate;
|
int outputSampleRate;
|
||||||
int outputChannelCount;
|
int outputChannelCount;
|
||||||
int outputChannelConfig;
|
int outputChannelConfig;
|
||||||
int outputPcmFrameSize;
|
int outputPcmFrameSize;
|
||||||
|
|
||||||
if (isInputPcm) {
|
if (Util.isEncodingLinearPcm(inputFormat.encoding)) {
|
||||||
inputPcmFrameSize = Util.getPcmFrameSize(inputFormat.encoding, inputFormat.channelCount);
|
inputPcmFrameSize = Util.getPcmFrameSize(inputFormat.encoding, inputFormat.channelCount);
|
||||||
|
|
||||||
boolean useFloatOutput =
|
boolean useFloatOutput =
|
||||||
@ -518,23 +522,13 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
useOffload = false;
|
outputMode = OUTPUT_MODE_PCM;
|
||||||
outputEncoding = outputFormat.encoding;
|
outputEncoding = outputFormat.encoding;
|
||||||
outputSampleRate = outputFormat.sampleRate;
|
outputSampleRate = outputFormat.sampleRate;
|
||||||
outputChannelCount = outputFormat.channelCount;
|
outputChannelCount = outputFormat.channelCount;
|
||||||
outputChannelConfig = Util.getAudioTrackChannelConfig(outputChannelCount);
|
outputChannelConfig = Util.getAudioTrackChannelConfig(outputChannelCount);
|
||||||
outputPcmFrameSize = Util.getPcmFrameSize(outputEncoding, outputChannelCount);
|
outputPcmFrameSize = Util.getPcmFrameSize(outputEncoding, outputChannelCount);
|
||||||
} else {
|
} else {
|
||||||
// We're configuring for either passthrough or offload.
|
|
||||||
useOffload =
|
|
||||||
enableOffload
|
|
||||||
&& isOffloadedPlaybackSupported(
|
|
||||||
inputFormat.channelCount,
|
|
||||||
inputFormat.sampleRate,
|
|
||||||
inputFormat.encoding,
|
|
||||||
audioAttributes,
|
|
||||||
inputFormat.encoderDelay,
|
|
||||||
inputFormat.encoderPadding);
|
|
||||||
inputPcmFrameSize = C.LENGTH_UNSET;
|
inputPcmFrameSize = C.LENGTH_UNSET;
|
||||||
availableAudioProcessors = new AudioProcessor[0];
|
availableAudioProcessors = new AudioProcessor[0];
|
||||||
canApplyPlaybackParameters = false;
|
canApplyPlaybackParameters = false;
|
||||||
@ -542,10 +536,13 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
outputSampleRate = inputFormat.sampleRate;
|
outputSampleRate = inputFormat.sampleRate;
|
||||||
outputChannelCount = inputFormat.channelCount;
|
outputChannelCount = inputFormat.channelCount;
|
||||||
outputPcmFrameSize = C.LENGTH_UNSET;
|
outputPcmFrameSize = C.LENGTH_UNSET;
|
||||||
outputChannelConfig =
|
if (enableOffload && isOffloadedPlaybackSupported(inputFormat, audioAttributes)) {
|
||||||
useOffload
|
outputMode = OUTPUT_MODE_OFFLOAD;
|
||||||
? Util.getAudioTrackChannelConfig(outputChannelCount)
|
outputChannelConfig = Util.getAudioTrackChannelConfig(outputChannelCount);
|
||||||
: getChannelConfigForPassthrough(inputFormat.channelCount);
|
} else {
|
||||||
|
outputMode = OUTPUT_MODE_PASSTHROUGH;
|
||||||
|
outputChannelConfig = getChannelConfigForPassthrough(inputFormat.channelCount);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (outputChannelConfig == AudioFormat.CHANNEL_INVALID) {
|
if (outputChannelConfig == AudioFormat.CHANNEL_INVALID) {
|
||||||
@ -554,9 +551,9 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
Configuration pendingConfiguration =
|
Configuration pendingConfiguration =
|
||||||
new Configuration(
|
new Configuration(
|
||||||
isInputPcm,
|
|
||||||
inputPcmFrameSize,
|
inputPcmFrameSize,
|
||||||
inputFormat.sampleRate,
|
inputFormat.sampleRate,
|
||||||
|
outputMode,
|
||||||
outputPcmFrameSize,
|
outputPcmFrameSize,
|
||||||
outputSampleRate,
|
outputSampleRate,
|
||||||
outputChannelConfig,
|
outputChannelConfig,
|
||||||
@ -565,8 +562,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
canApplyPlaybackParameters,
|
canApplyPlaybackParameters,
|
||||||
availableAudioProcessors,
|
availableAudioProcessors,
|
||||||
inputFormat.encoderDelay,
|
inputFormat.encoderDelay,
|
||||||
inputFormat.encoderPadding,
|
inputFormat.encoderPadding);
|
||||||
useOffload);
|
|
||||||
if (isInitialized()) {
|
if (isInitialized()) {
|
||||||
this.pendingConfiguration = pendingConfiguration;
|
this.pendingConfiguration = pendingConfiguration;
|
||||||
} else {
|
} else {
|
||||||
@ -641,6 +637,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
audioTrackPositionTracker.setAudioTrack(
|
audioTrackPositionTracker.setAudioTrack(
|
||||||
audioTrack,
|
audioTrack,
|
||||||
|
configuration.outputMode == OUTPUT_MODE_PASSTHROUGH,
|
||||||
configuration.outputEncoding,
|
configuration.outputEncoding,
|
||||||
configuration.outputPcmFrameSize,
|
configuration.outputPcmFrameSize,
|
||||||
configuration.bufferSize);
|
configuration.bufferSize);
|
||||||
@ -718,7 +715,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!configuration.isInputPcm && framesPerEncodedSample == 0) {
|
if (configuration.outputMode != OUTPUT_MODE_PCM && framesPerEncodedSample == 0) {
|
||||||
// If this is the first encoded sample, calculate the sample size in frames.
|
// If this is the first encoded sample, calculate the sample size in frames.
|
||||||
framesPerEncodedSample = getFramesPerEncodedSample(configuration.outputEncoding, buffer);
|
framesPerEncodedSample = getFramesPerEncodedSample(configuration.outputEncoding, buffer);
|
||||||
if (framesPerEncodedSample == 0) {
|
if (framesPerEncodedSample == 0) {
|
||||||
@ -772,7 +769,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (configuration.isInputPcm) {
|
if (configuration.outputMode == OUTPUT_MODE_PCM) {
|
||||||
submittedPcmBytes += buffer.remaining();
|
submittedPcmBytes += buffer.remaining();
|
||||||
} else {
|
} else {
|
||||||
submittedEncodedFrames += framesPerEncodedSample * encodedAccessUnitCount;
|
submittedEncodedFrames += framesPerEncodedSample * encodedAccessUnitCount;
|
||||||
@ -861,7 +858,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
int bytesRemaining = buffer.remaining();
|
int bytesRemaining = buffer.remaining();
|
||||||
int bytesWritten = 0;
|
int bytesWritten = 0;
|
||||||
if (Util.SDK_INT < 21) { // isInputPcm == true
|
if (Util.SDK_INT < 21) { // outputMode == OUTPUT_MODE_PCM.
|
||||||
// Work out how many bytes we can write without the risk of blocking.
|
// Work out how many bytes we can write without the risk of blocking.
|
||||||
int bytesToWrite = audioTrackPositionTracker.getAvailableBufferSize(writtenPcmBytes);
|
int bytesToWrite = audioTrackPositionTracker.getAvailableBufferSize(writtenPcmBytes);
|
||||||
if (bytesToWrite > 0) {
|
if (bytesToWrite > 0) {
|
||||||
@ -896,11 +893,11 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
listener.onOffloadBufferFull(pendingDurationMs);
|
listener.onOffloadBufferFull(pendingDurationMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (configuration.isInputPcm) {
|
if (configuration.outputMode == OUTPUT_MODE_PCM) {
|
||||||
writtenPcmBytes += bytesWritten;
|
writtenPcmBytes += bytesWritten;
|
||||||
}
|
}
|
||||||
if (bytesWritten == bytesRemaining) {
|
if (bytesWritten == bytesRemaining) {
|
||||||
if (!configuration.isInputPcm) {
|
if (configuration.outputMode != OUTPUT_MODE_PCM) {
|
||||||
// When playing non-PCM, the inputBuffer is never processed, thus the last inputBuffer
|
// When playing non-PCM, the inputBuffer is never processed, thus the last inputBuffer
|
||||||
// must be the current input buffer.
|
// must be the current input buffer.
|
||||||
Assertions.checkState(buffer == inputBuffer);
|
Assertions.checkState(buffer == inputBuffer);
|
||||||
@ -1268,13 +1265,13 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private long getSubmittedFrames() {
|
private long getSubmittedFrames() {
|
||||||
return configuration.isInputPcm
|
return configuration.outputMode == OUTPUT_MODE_PCM
|
||||||
? (submittedPcmBytes / configuration.inputPcmFrameSize)
|
? (submittedPcmBytes / configuration.inputPcmFrameSize)
|
||||||
: submittedEncodedFrames;
|
: submittedEncodedFrames;
|
||||||
}
|
}
|
||||||
|
|
||||||
private long getWrittenFrames() {
|
private long getWrittenFrames() {
|
||||||
return configuration.isInputPcm
|
return configuration.outputMode == OUTPUT_MODE_PCM
|
||||||
? (writtenPcmBytes / configuration.outputPcmFrameSize)
|
? (writtenPcmBytes / configuration.outputPcmFrameSize)
|
||||||
: writtenEncodedFrames;
|
: writtenEncodedFrames;
|
||||||
}
|
}
|
||||||
@ -1297,23 +1294,18 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static boolean isOffloadedPlaybackSupported(
|
private static boolean isOffloadedPlaybackSupported(
|
||||||
int channelCount,
|
Format format, AudioAttributes audioAttributes) {
|
||||||
int sampleRateHz,
|
|
||||||
@C.Encoding int encoding,
|
|
||||||
AudioAttributes audioAttributes,
|
|
||||||
int trimStartFrames,
|
|
||||||
int trimEndFrames) {
|
|
||||||
if (Util.SDK_INT < 29) {
|
if (Util.SDK_INT < 29) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
int channelConfig = Util.getAudioTrackChannelConfig(channelCount);
|
int channelConfig = Util.getAudioTrackChannelConfig(format.channelCount);
|
||||||
AudioFormat audioFormat = getAudioFormat(sampleRateHz, channelConfig, encoding);
|
AudioFormat audioFormat = getAudioFormat(format.sampleRate, channelConfig, format.encoding);
|
||||||
if (!AudioManager.isOffloadedPlaybackSupported(
|
if (!AudioManager.isOffloadedPlaybackSupported(
|
||||||
audioFormat, audioAttributes.getAudioAttributesV21())) {
|
audioFormat, audioAttributes.getAudioAttributesV21())) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
boolean noGapless = trimStartFrames == 0 && trimEndFrames == 0;
|
boolean notGapless = format.encoderDelay == 0 && format.encoderPadding == 0;
|
||||||
return noGapless || isOffloadGaplessSupported();
|
return notGapless || isOffloadGaplessSupported();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1634,9 +1626,9 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
/** Stores configuration relating to the audio format. */
|
/** Stores configuration relating to the audio format. */
|
||||||
private static final class Configuration {
|
private static final class Configuration {
|
||||||
|
|
||||||
public final boolean isInputPcm;
|
|
||||||
public final int inputPcmFrameSize;
|
public final int inputPcmFrameSize;
|
||||||
public final int inputSampleRate;
|
public final int inputSampleRate;
|
||||||
|
@OutputMode public final int outputMode;
|
||||||
public final int outputPcmFrameSize;
|
public final int outputPcmFrameSize;
|
||||||
public final int outputSampleRate;
|
public final int outputSampleRate;
|
||||||
public final int outputChannelConfig;
|
public final int outputChannelConfig;
|
||||||
@ -1646,12 +1638,11 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
public final AudioProcessor[] availableAudioProcessors;
|
public final AudioProcessor[] availableAudioProcessors;
|
||||||
public int trimStartFrames;
|
public int trimStartFrames;
|
||||||
public int trimEndFrames;
|
public int trimEndFrames;
|
||||||
public final boolean useOffload;
|
|
||||||
|
|
||||||
public Configuration(
|
public Configuration(
|
||||||
boolean isInputPcm,
|
|
||||||
int inputPcmFrameSize,
|
int inputPcmFrameSize,
|
||||||
int inputSampleRate,
|
int inputSampleRate,
|
||||||
|
@OutputMode int outputMode,
|
||||||
int outputPcmFrameSize,
|
int outputPcmFrameSize,
|
||||||
int outputSampleRate,
|
int outputSampleRate,
|
||||||
int outputChannelConfig,
|
int outputChannelConfig,
|
||||||
@ -1660,11 +1651,10 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
boolean canApplyPlaybackParameters,
|
boolean canApplyPlaybackParameters,
|
||||||
AudioProcessor[] availableAudioProcessors,
|
AudioProcessor[] availableAudioProcessors,
|
||||||
int trimStartFrames,
|
int trimStartFrames,
|
||||||
int trimEndFrames,
|
int trimEndFrames) {
|
||||||
boolean useOffload) {
|
|
||||||
this.isInputPcm = isInputPcm;
|
|
||||||
this.inputPcmFrameSize = inputPcmFrameSize;
|
this.inputPcmFrameSize = inputPcmFrameSize;
|
||||||
this.inputSampleRate = inputSampleRate;
|
this.inputSampleRate = inputSampleRate;
|
||||||
|
this.outputMode = outputMode;
|
||||||
this.outputPcmFrameSize = outputPcmFrameSize;
|
this.outputPcmFrameSize = outputPcmFrameSize;
|
||||||
this.outputSampleRate = outputSampleRate;
|
this.outputSampleRate = outputSampleRate;
|
||||||
this.outputChannelConfig = outputChannelConfig;
|
this.outputChannelConfig = outputChannelConfig;
|
||||||
@ -1673,7 +1663,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
this.availableAudioProcessors = availableAudioProcessors;
|
this.availableAudioProcessors = availableAudioProcessors;
|
||||||
this.trimStartFrames = trimStartFrames;
|
this.trimStartFrames = trimStartFrames;
|
||||||
this.trimEndFrames = trimEndFrames;
|
this.trimEndFrames = trimEndFrames;
|
||||||
this.useOffload = useOffload;
|
|
||||||
|
|
||||||
// Call computeBufferSize() last as it depends on the other configuration values.
|
// Call computeBufferSize() last as it depends on the other configuration values.
|
||||||
this.bufferSize = computeBufferSize(specifiedBufferSize);
|
this.bufferSize = computeBufferSize(specifiedBufferSize);
|
||||||
@ -1681,11 +1670,11 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
/** Returns if the configurations are sufficiently compatible to reuse the audio track. */
|
/** Returns if the configurations are sufficiently compatible to reuse the audio track. */
|
||||||
public boolean canReuseAudioTrack(Configuration audioTrackConfiguration) {
|
public boolean canReuseAudioTrack(Configuration audioTrackConfiguration) {
|
||||||
return audioTrackConfiguration.outputEncoding == outputEncoding
|
return audioTrackConfiguration.outputMode == outputMode
|
||||||
|
&& audioTrackConfiguration.outputEncoding == outputEncoding
|
||||||
&& audioTrackConfiguration.outputSampleRate == outputSampleRate
|
&& audioTrackConfiguration.outputSampleRate == outputSampleRate
|
||||||
&& audioTrackConfiguration.outputChannelConfig == outputChannelConfig
|
&& audioTrackConfiguration.outputChannelConfig == outputChannelConfig
|
||||||
&& audioTrackConfiguration.outputPcmFrameSize == outputPcmFrameSize
|
&& audioTrackConfiguration.outputPcmFrameSize == outputPcmFrameSize;
|
||||||
&& audioTrackConfiguration.useOffload == useOffload;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public long inputFramesToDurationUs(long frameCount) {
|
public long inputFramesToDurationUs(long frameCount) {
|
||||||
@ -1738,7 +1727,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
.setTransferMode(AudioTrack.MODE_STREAM)
|
.setTransferMode(AudioTrack.MODE_STREAM)
|
||||||
.setBufferSizeInBytes(bufferSize)
|
.setBufferSizeInBytes(bufferSize)
|
||||||
.setSessionId(audioSessionId)
|
.setSessionId(audioSessionId)
|
||||||
.setOffloadedPlayback(useOffload)
|
.setOffloadedPlayback(outputMode == OUTPUT_MODE_OFFLOAD)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1779,12 +1768,16 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private int computeBufferSize(int specifiedBufferSize) {
|
private int computeBufferSize(int specifiedBufferSize) {
|
||||||
if (specifiedBufferSize != 0) {
|
if (specifiedBufferSize != 0) {
|
||||||
return specifiedBufferSize;
|
return specifiedBufferSize;
|
||||||
} else if (isInputPcm) {
|
}
|
||||||
return getPcmDefaultBufferSize();
|
switch (outputMode) {
|
||||||
} else if (useOffload) {
|
case OUTPUT_MODE_PCM:
|
||||||
return getEncodedDefaultBufferSize(OFFLOAD_BUFFER_DURATION_US);
|
return getPcmDefaultBufferSize();
|
||||||
} else { // Passthrough
|
case OUTPUT_MODE_OFFLOAD:
|
||||||
return getEncodedDefaultBufferSize(PASSTHROUGH_BUFFER_DURATION_US);
|
return getEncodedDefaultBufferSize(OFFLOAD_BUFFER_DURATION_US);
|
||||||
|
case OUTPUT_MODE_PASSTHROUGH:
|
||||||
|
return getEncodedDefaultBufferSize(PASSTHROUGH_BUFFER_DURATION_US);
|
||||||
|
default:
|
||||||
|
throw new IllegalStateException();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user