mirror of
https://github.com/androidx/media.git
synced 2025-04-30 06:46:50 +08:00
Allow overriding skipping/scaling with custom AudioProcessors
Issue: #3142 ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=193519029
This commit is contained in:
parent
d2c6871ce6
commit
2b367759cf
@ -53,6 +53,8 @@
|
|||||||
with encoder padding when the decoder returns a non-empty final buffer.
|
with encoder padding when the decoder returns a non-empty final buffer.
|
||||||
* Allow trimming more than one sample when applying an elst audio edit via
|
* Allow trimming more than one sample when applying an elst audio edit via
|
||||||
gapless playback info.
|
gapless playback info.
|
||||||
|
* Allow overriding skipping/scaling with custom `AudioProcessor`s
|
||||||
|
((#3142)[https://github.com/google/ExoPlayer/issues/3142]).
|
||||||
* Caching:
|
* Caching:
|
||||||
* Add release method to Cache interface.
|
* Add release method to Cache interface.
|
||||||
* Prevent multiple instances of SimpleCache in the same folder.
|
* Prevent multiple instances of SimpleCache in the same folder.
|
||||||
|
@ -35,6 +35,8 @@ import java.nio.ByteBuffer;
|
|||||||
import java.nio.ByteOrder;
|
import java.nio.ByteOrder;
|
||||||
import java.util.ArrayDeque;
|
import java.util.ArrayDeque;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Plays audio data. The implementation delegates to an {@link AudioTrack} and handles playback
|
* Plays audio data. The implementation delegates to an {@link AudioTrack} and handles playback
|
||||||
@ -64,6 +66,92 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides a chain of audio processors, which are used for any user-defined processing and
|
||||||
|
* applying playback parameters (if supported). Because applying playback parameters can skip and
|
||||||
|
* stretch/compress audio, the sink will query the chain for information on how to transform its
|
||||||
|
* output position to map it onto a media position, via {@link #getMediaDuration(long)} and {@link
|
||||||
|
* #getSkippedOutputFrameCount()}.
|
||||||
|
*/
|
||||||
|
public interface AudioProcessorChain {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the fixed chain of audio processors that will process audio. This method is called
|
||||||
|
* once during initialization, but audio processors may change state to become active/inactive
|
||||||
|
* during playback.
|
||||||
|
*/
|
||||||
|
AudioProcessor[] getAudioProcessors();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configures audio processors to apply the specified playback parameters immediately, returning
|
||||||
|
* the new parameters, which may differ from those passed in. Only called when processors have
|
||||||
|
* no input pending.
|
||||||
|
*
|
||||||
|
* @param playbackParameters The playback parameters to try to apply.
|
||||||
|
* @return The playback parameters that were actually applied.
|
||||||
|
*/
|
||||||
|
PlaybackParameters applyPlaybackParameters(PlaybackParameters playbackParameters);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scales the specified playout duration to take into account speedup due to audio processing,
|
||||||
|
* returning an input media duration, in arbitrary units.
|
||||||
|
*/
|
||||||
|
long getMediaDuration(long playoutDuration);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the number of output audio frames skipped since the audio processors were last
|
||||||
|
* flushed.
|
||||||
|
*/
|
||||||
|
long getSkippedOutputFrameCount();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The default audio processor chain, which applies a (possibly empty) chain of user-defined audio
|
||||||
|
* processors followed by {@link SilenceSkippingAudioProcessor} and {@link SonicAudioProcessor}.
|
||||||
|
*/
|
||||||
|
public static class DefaultAudioProcessorChain implements AudioProcessorChain {
|
||||||
|
|
||||||
|
private final AudioProcessor[] audioProcessors;
|
||||||
|
private final SilenceSkippingAudioProcessor silenceSkippingAudioProcessor;
|
||||||
|
private final SonicAudioProcessor sonicAudioProcessor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new default chain of audio processors, with the user-defined {@code
|
||||||
|
* audioProcessors} applied before silence skipping and playback parameters.
|
||||||
|
*/
|
||||||
|
public DefaultAudioProcessorChain(AudioProcessor... audioProcessors) {
|
||||||
|
this.audioProcessors = Arrays.copyOf(audioProcessors, audioProcessors.length + 2);
|
||||||
|
silenceSkippingAudioProcessor = new SilenceSkippingAudioProcessor();
|
||||||
|
sonicAudioProcessor = new SonicAudioProcessor();
|
||||||
|
this.audioProcessors[audioProcessors.length] = silenceSkippingAudioProcessor;
|
||||||
|
this.audioProcessors[audioProcessors.length + 1] = sonicAudioProcessor;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AudioProcessor[] getAudioProcessors() {
|
||||||
|
return audioProcessors;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PlaybackParameters applyPlaybackParameters(PlaybackParameters playbackParameters) {
|
||||||
|
silenceSkippingAudioProcessor.setEnabled(playbackParameters.skipSilence);
|
||||||
|
return new PlaybackParameters(
|
||||||
|
sonicAudioProcessor.setSpeed(playbackParameters.speed),
|
||||||
|
sonicAudioProcessor.setPitch(playbackParameters.pitch),
|
||||||
|
playbackParameters.skipSilence);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getMediaDuration(long playoutDuration) {
|
||||||
|
return sonicAudioProcessor.scaleDurationForSpeedup(playoutDuration);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getSkippedOutputFrameCount() {
|
||||||
|
return silenceSkippingAudioProcessor.getSkippedFrames();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A minimum length for the {@link AudioTrack} buffer, in microseconds.
|
* A minimum length for the {@link AudioTrack} buffer, in microseconds.
|
||||||
*/
|
*/
|
||||||
@ -135,11 +223,10 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
public static boolean failOnSpuriousAudioTimestamp = false;
|
public static boolean failOnSpuriousAudioTimestamp = false;
|
||||||
|
|
||||||
@Nullable private final AudioCapabilities audioCapabilities;
|
@Nullable private final AudioCapabilities audioCapabilities;
|
||||||
|
private final AudioProcessorChain audioProcessorChain;
|
||||||
private final boolean enableConvertHighResIntPcmToFloat;
|
private final boolean enableConvertHighResIntPcmToFloat;
|
||||||
private final ChannelMappingAudioProcessor channelMappingAudioProcessor;
|
private final ChannelMappingAudioProcessor channelMappingAudioProcessor;
|
||||||
private final TrimmingAudioProcessor trimmingAudioProcessor;
|
private final TrimmingAudioProcessor trimmingAudioProcessor;
|
||||||
private final SilenceSkippingAudioProcessor silenceSkippingAudioProcessor;
|
|
||||||
private final SonicAudioProcessor sonicAudioProcessor;
|
|
||||||
private final AudioProcessor[] toIntPcmAvailableAudioProcessors;
|
private final AudioProcessor[] toIntPcmAvailableAudioProcessors;
|
||||||
private final AudioProcessor[] toFloatPcmAvailableAudioProcessors;
|
private final AudioProcessor[] toFloatPcmAvailableAudioProcessors;
|
||||||
private final ConditionVariable releasingConditionVariable;
|
private final ConditionVariable releasingConditionVariable;
|
||||||
@ -181,7 +268,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private long startMediaTimeUs;
|
private long startMediaTimeUs;
|
||||||
private float volume;
|
private float volume;
|
||||||
|
|
||||||
private AudioProcessor[] audioProcessors;
|
private AudioProcessor[] activeAudioProcessors;
|
||||||
private ByteBuffer[] outputBuffers;
|
private ByteBuffer[] outputBuffers;
|
||||||
@Nullable private ByteBuffer inputBuffer;
|
@Nullable private ByteBuffer inputBuffer;
|
||||||
@Nullable private ByteBuffer outputBuffer;
|
@Nullable private ByteBuffer outputBuffer;
|
||||||
@ -196,17 +283,21 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private long lastFeedElapsedRealtimeMs;
|
private long lastFeedElapsedRealtimeMs;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* Creates a new default audio sink.
|
||||||
|
*
|
||||||
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
|
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
|
||||||
* default capabilities (no encoded audio passthrough support) should be assumed.
|
* default capabilities (no encoded audio passthrough support) should be assumed.
|
||||||
* @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
|
* @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
|
||||||
* output. May be empty.
|
* output. May be empty.
|
||||||
*/
|
*/
|
||||||
public DefaultAudioSink(@Nullable AudioCapabilities audioCapabilities,
|
public DefaultAudioSink(
|
||||||
AudioProcessor[] audioProcessors) {
|
@Nullable AudioCapabilities audioCapabilities, AudioProcessor[] audioProcessors) {
|
||||||
this(audioCapabilities, audioProcessors, /* enableConvertHighResIntPcmToFloat= */ false);
|
this(audioCapabilities, audioProcessors, /* enableConvertHighResIntPcmToFloat= */ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* Creates a new default audio sink, optionally using float output for high resolution PCM.
|
||||||
|
*
|
||||||
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
|
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
|
||||||
* default capabilities (no encoded audio passthrough support) should be assumed.
|
* default capabilities (no encoded audio passthrough support) should be assumed.
|
||||||
* @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
|
* @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
|
||||||
@ -220,22 +311,45 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
@Nullable AudioCapabilities audioCapabilities,
|
@Nullable AudioCapabilities audioCapabilities,
|
||||||
AudioProcessor[] audioProcessors,
|
AudioProcessor[] audioProcessors,
|
||||||
boolean enableConvertHighResIntPcmToFloat) {
|
boolean enableConvertHighResIntPcmToFloat) {
|
||||||
|
this(
|
||||||
|
audioCapabilities,
|
||||||
|
new DefaultAudioProcessorChain(audioProcessors),
|
||||||
|
enableConvertHighResIntPcmToFloat);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new default audio sink, optionally using float output for high resolution PCM and
|
||||||
|
* with the specified {@code audioProcessorChain}.
|
||||||
|
*
|
||||||
|
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
|
||||||
|
* default capabilities (no encoded audio passthrough support) should be assumed.
|
||||||
|
* @param audioProcessorChain An {@link AudioProcessorChain} which is used to apply playback
|
||||||
|
* parameters adjustments. The instance passed in must not be reused in other sinks.
|
||||||
|
* @param enableConvertHighResIntPcmToFloat Whether to enable conversion of high resolution
|
||||||
|
* integer PCM to 32-bit float for output, if possible. Functionality that uses 16-bit integer
|
||||||
|
* audio processing (for example, speed and pitch adjustment) will not be available when float
|
||||||
|
* output is in use.
|
||||||
|
*/
|
||||||
|
public DefaultAudioSink(
|
||||||
|
@Nullable AudioCapabilities audioCapabilities,
|
||||||
|
AudioProcessorChain audioProcessorChain,
|
||||||
|
boolean enableConvertHighResIntPcmToFloat) {
|
||||||
this.audioCapabilities = audioCapabilities;
|
this.audioCapabilities = audioCapabilities;
|
||||||
|
this.audioProcessorChain = Assertions.checkNotNull(audioProcessorChain);
|
||||||
this.enableConvertHighResIntPcmToFloat = enableConvertHighResIntPcmToFloat;
|
this.enableConvertHighResIntPcmToFloat = enableConvertHighResIntPcmToFloat;
|
||||||
releasingConditionVariable = new ConditionVariable(true);
|
releasingConditionVariable = new ConditionVariable(true);
|
||||||
audioTrackPositionTracker = new AudioTrackPositionTracker(new PositionTrackerListener());
|
audioTrackPositionTracker = new AudioTrackPositionTracker(new PositionTrackerListener());
|
||||||
channelMappingAudioProcessor = new ChannelMappingAudioProcessor();
|
channelMappingAudioProcessor = new ChannelMappingAudioProcessor();
|
||||||
trimmingAudioProcessor = new TrimmingAudioProcessor();
|
trimmingAudioProcessor = new TrimmingAudioProcessor();
|
||||||
silenceSkippingAudioProcessor = new SilenceSkippingAudioProcessor();
|
ArrayList<AudioProcessor> toIntPcmAudioProcessors = new ArrayList<>();
|
||||||
sonicAudioProcessor = new SonicAudioProcessor();
|
Collections.addAll(
|
||||||
toIntPcmAvailableAudioProcessors = new AudioProcessor[5 + audioProcessors.length];
|
toIntPcmAudioProcessors,
|
||||||
toIntPcmAvailableAudioProcessors[0] = new ResamplingAudioProcessor();
|
new ResamplingAudioProcessor(),
|
||||||
toIntPcmAvailableAudioProcessors[1] = channelMappingAudioProcessor;
|
channelMappingAudioProcessor,
|
||||||
toIntPcmAvailableAudioProcessors[2] = trimmingAudioProcessor;
|
trimmingAudioProcessor);
|
||||||
System.arraycopy(
|
Collections.addAll(toIntPcmAudioProcessors, audioProcessorChain.getAudioProcessors());
|
||||||
audioProcessors, 0, toIntPcmAvailableAudioProcessors, 3, audioProcessors.length);
|
toIntPcmAvailableAudioProcessors =
|
||||||
toIntPcmAvailableAudioProcessors[3 + audioProcessors.length] = silenceSkippingAudioProcessor;
|
toIntPcmAudioProcessors.toArray(new AudioProcessor[toIntPcmAudioProcessors.size()]);
|
||||||
toIntPcmAvailableAudioProcessors[4 + audioProcessors.length] = sonicAudioProcessor;
|
|
||||||
toFloatPcmAvailableAudioProcessors = new AudioProcessor[] {new FloatResamplingAudioProcessor()};
|
toFloatPcmAvailableAudioProcessors = new AudioProcessor[] {new FloatResamplingAudioProcessor()};
|
||||||
volume = 1.0f;
|
volume = 1.0f;
|
||||||
startMediaTimeState = START_NOT_SET;
|
startMediaTimeState = START_NOT_SET;
|
||||||
@ -243,7 +357,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
audioSessionId = C.AUDIO_SESSION_ID_UNSET;
|
audioSessionId = C.AUDIO_SESSION_ID_UNSET;
|
||||||
playbackParameters = PlaybackParameters.DEFAULT;
|
playbackParameters = PlaybackParameters.DEFAULT;
|
||||||
drainingAudioProcessorIndex = C.INDEX_UNSET;
|
drainingAudioProcessorIndex = C.INDEX_UNSET;
|
||||||
this.audioProcessors = new AudioProcessor[0];
|
activeAudioProcessors = new AudioProcessor[0];
|
||||||
outputBuffers = new ByteBuffer[0];
|
outputBuffers = new ByteBuffer[0];
|
||||||
playbackParametersCheckpoints = new ArrayDeque<>();
|
playbackParametersCheckpoints = new ArrayDeque<>();
|
||||||
}
|
}
|
||||||
@ -423,14 +537,14 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
int count = newAudioProcessors.size();
|
int count = newAudioProcessors.size();
|
||||||
audioProcessors = newAudioProcessors.toArray(new AudioProcessor[count]);
|
activeAudioProcessors = newAudioProcessors.toArray(new AudioProcessor[count]);
|
||||||
outputBuffers = new ByteBuffer[count];
|
outputBuffers = new ByteBuffer[count];
|
||||||
flushAudioProcessors();
|
flushAudioProcessors();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void flushAudioProcessors() {
|
private void flushAudioProcessors() {
|
||||||
for (int i = 0; i < audioProcessors.length; i++) {
|
for (int i = 0; i < activeAudioProcessors.length; i++) {
|
||||||
AudioProcessor audioProcessor = audioProcessors[i];
|
AudioProcessor audioProcessor = activeAudioProcessors[i];
|
||||||
audioProcessor.flush();
|
audioProcessor.flush();
|
||||||
outputBuffers[i] = audioProcessor.getOutput();
|
outputBuffers[i] = audioProcessor.getOutput();
|
||||||
}
|
}
|
||||||
@ -468,7 +582,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
playbackParameters =
|
playbackParameters =
|
||||||
canApplyPlaybackParameters
|
canApplyPlaybackParameters
|
||||||
? applyPlaybackParameters(playbackParameters)
|
? audioProcessorChain.applyPlaybackParameters(playbackParameters)
|
||||||
: PlaybackParameters.DEFAULT;
|
: PlaybackParameters.DEFAULT;
|
||||||
setupAudioProcessors();
|
setupAudioProcessors();
|
||||||
|
|
||||||
@ -536,7 +650,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
PlaybackParameters newPlaybackParameters = afterDrainPlaybackParameters;
|
PlaybackParameters newPlaybackParameters = afterDrainPlaybackParameters;
|
||||||
afterDrainPlaybackParameters = null;
|
afterDrainPlaybackParameters = null;
|
||||||
newPlaybackParameters = applyPlaybackParameters(newPlaybackParameters);
|
newPlaybackParameters = audioProcessorChain.applyPlaybackParameters(newPlaybackParameters);
|
||||||
// Store the position and corresponding media time from which the parameters will apply.
|
// Store the position and corresponding media time from which the parameters will apply.
|
||||||
playbackParametersCheckpoints.add(
|
playbackParametersCheckpoints.add(
|
||||||
new PlaybackParametersCheckpoint(
|
new PlaybackParametersCheckpoint(
|
||||||
@ -601,7 +715,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void processBuffers(long avSyncPresentationTimeUs) throws WriteException {
|
private void processBuffers(long avSyncPresentationTimeUs) throws WriteException {
|
||||||
int count = audioProcessors.length;
|
int count = activeAudioProcessors.length;
|
||||||
int index = count;
|
int index = count;
|
||||||
while (index >= 0) {
|
while (index >= 0) {
|
||||||
ByteBuffer input = index > 0 ? outputBuffers[index - 1]
|
ByteBuffer input = index > 0 ? outputBuffers[index - 1]
|
||||||
@ -609,7 +723,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
if (index == count) {
|
if (index == count) {
|
||||||
writeBuffer(input, avSyncPresentationTimeUs);
|
writeBuffer(input, avSyncPresentationTimeUs);
|
||||||
} else {
|
} else {
|
||||||
AudioProcessor audioProcessor = audioProcessors[index];
|
AudioProcessor audioProcessor = activeAudioProcessors[index];
|
||||||
audioProcessor.queueInput(input);
|
audioProcessor.queueInput(input);
|
||||||
ByteBuffer output = audioProcessor.getOutput();
|
ByteBuffer output = audioProcessor.getOutput();
|
||||||
outputBuffers[index] = output;
|
outputBuffers[index] = output;
|
||||||
@ -706,11 +820,11 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private boolean drainAudioProcessorsToEndOfStream() throws WriteException {
|
private boolean drainAudioProcessorsToEndOfStream() throws WriteException {
|
||||||
boolean audioProcessorNeedsEndOfStream = false;
|
boolean audioProcessorNeedsEndOfStream = false;
|
||||||
if (drainingAudioProcessorIndex == C.INDEX_UNSET) {
|
if (drainingAudioProcessorIndex == C.INDEX_UNSET) {
|
||||||
drainingAudioProcessorIndex = processingEnabled ? 0 : audioProcessors.length;
|
drainingAudioProcessorIndex = processingEnabled ? 0 : activeAudioProcessors.length;
|
||||||
audioProcessorNeedsEndOfStream = true;
|
audioProcessorNeedsEndOfStream = true;
|
||||||
}
|
}
|
||||||
while (drainingAudioProcessorIndex < audioProcessors.length) {
|
while (drainingAudioProcessorIndex < activeAudioProcessors.length) {
|
||||||
AudioProcessor audioProcessor = audioProcessors[drainingAudioProcessorIndex];
|
AudioProcessor audioProcessor = activeAudioProcessors[drainingAudioProcessorIndex];
|
||||||
if (audioProcessorNeedsEndOfStream) {
|
if (audioProcessorNeedsEndOfStream) {
|
||||||
audioProcessor.queueEndOfStream();
|
audioProcessor.queueEndOfStream();
|
||||||
}
|
}
|
||||||
@ -762,7 +876,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
afterDrainPlaybackParameters = playbackParameters;
|
afterDrainPlaybackParameters = playbackParameters;
|
||||||
} else {
|
} else {
|
||||||
// Update the playback parameters now.
|
// Update the playback parameters now.
|
||||||
this.playbackParameters = applyPlaybackParameters(playbackParameters);
|
this.playbackParameters = audioProcessorChain.applyPlaybackParameters(playbackParameters);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return this.playbackParameters;
|
return this.playbackParameters;
|
||||||
@ -920,29 +1034,14 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}.start();
|
}.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Configures audio processors to apply the specified playback parameters, returning the new
|
|
||||||
* parameters, which may differ from those passed in.
|
|
||||||
*
|
|
||||||
* @param playbackParameters The playback parameters to try to apply.
|
|
||||||
* @return The playback parameters that were actually applied.
|
|
||||||
*/
|
|
||||||
private PlaybackParameters applyPlaybackParameters(PlaybackParameters playbackParameters) {
|
|
||||||
silenceSkippingAudioProcessor.setEnabled(playbackParameters.skipSilence);
|
|
||||||
return new PlaybackParameters(
|
|
||||||
sonicAudioProcessor.setSpeed(playbackParameters.speed),
|
|
||||||
sonicAudioProcessor.setPitch(playbackParameters.pitch),
|
|
||||||
playbackParameters.skipSilence);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the underlying audio track {@code positionUs} with any applicable speedup applied.
|
|
||||||
*/
|
|
||||||
private long applySpeedup(long positionUs) {
|
private long applySpeedup(long positionUs) {
|
||||||
|
@Nullable PlaybackParametersCheckpoint checkpoint = null;
|
||||||
while (!playbackParametersCheckpoints.isEmpty()
|
while (!playbackParametersCheckpoints.isEmpty()
|
||||||
&& positionUs >= playbackParametersCheckpoints.getFirst().positionUs) {
|
&& positionUs >= playbackParametersCheckpoints.getFirst().positionUs) {
|
||||||
|
checkpoint = playbackParametersCheckpoints.remove();
|
||||||
|
}
|
||||||
|
if (checkpoint != null) {
|
||||||
// We are playing (or about to play) media with the new playback parameters, so update them.
|
// We are playing (or about to play) media with the new playback parameters, so update them.
|
||||||
PlaybackParametersCheckpoint checkpoint = playbackParametersCheckpoints.remove();
|
|
||||||
playbackParameters = checkpoint.playbackParameters;
|
playbackParameters = checkpoint.playbackParameters;
|
||||||
playbackParametersPositionUs = checkpoint.positionUs;
|
playbackParametersPositionUs = checkpoint.positionUs;
|
||||||
playbackParametersOffsetUs = checkpoint.mediaTimeUs - startMediaTimeUs;
|
playbackParametersOffsetUs = checkpoint.mediaTimeUs - startMediaTimeUs;
|
||||||
@ -954,8 +1053,9 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
if (playbackParametersCheckpoints.isEmpty()) {
|
if (playbackParametersCheckpoints.isEmpty()) {
|
||||||
return playbackParametersOffsetUs
|
return playbackParametersOffsetUs
|
||||||
+ sonicAudioProcessor.scaleDurationForSpeedup(positionUs - playbackParametersPositionUs);
|
+ audioProcessorChain.getMediaDuration(positionUs - playbackParametersPositionUs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are playing data at a previous playback speed, so fall back to multiplying by the speed.
|
// We are playing data at a previous playback speed, so fall back to multiplying by the speed.
|
||||||
return playbackParametersOffsetUs
|
return playbackParametersOffsetUs
|
||||||
+ Util.getMediaDurationForPlayoutDuration(
|
+ Util.getMediaDurationForPlayoutDuration(
|
||||||
@ -963,7 +1063,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private long applySkipping(long positionUs) {
|
private long applySkipping(long positionUs) {
|
||||||
return positionUs + framesToDurationUs(silenceSkippingAudioProcessor.getSkippedFrames());
|
return positionUs + framesToDurationUs(audioProcessorChain.getSkippedOutputFrameCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isInitialized() {
|
private boolean isInitialized() {
|
||||||
|
@ -27,7 +27,7 @@ import java.nio.ByteOrder;
|
|||||||
* An {@link AudioProcessor} that skips silence in the input stream. Input and output are 16-bit
|
* An {@link AudioProcessor} that skips silence in the input stream. Input and output are 16-bit
|
||||||
* PCM.
|
* PCM.
|
||||||
*/
|
*/
|
||||||
/* package */ final class SilenceSkippingAudioProcessor implements AudioProcessor {
|
public final class SilenceSkippingAudioProcessor implements AudioProcessor {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The minimum duration of audio that must be below {@link #SILENCE_THRESHOLD_LEVEL} to classify
|
* The minimum duration of audio that must be below {@link #SILENCE_THRESHOLD_LEVEL} to classify
|
||||||
|
Loading…
x
Reference in New Issue
Block a user