Compensate for trimmed audio in buffer time check

After a period transition the first buffer queued has the sum of previous period
durations added to its source presentation timestamp. These durations take into
account gapless edits, but the check on the timestamp was based on the submitted
frame count, not the frame count after trimming.

This change fixes an issue where audio/video would gradually drift apart due to
accumulated error in the audio track position, which could lead to freezing due
to the audio renderer stopping being ready and switching to the standalone media
clock.

Issue: #4559

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=213819908
This commit is contained in:
andrewlewis 2018-09-20 08:40:19 -07:00 committed by Oliver Woodman
parent 4b33d3c8a2
commit a0ab96623a
3 changed files with 55 additions and 6 deletions

View File

@ -4,6 +4,9 @@
* Allow setting log level for ExoPlayer logcat output
([#4665](https://github.com/google/ExoPlayer/issues/4665)).
* Fix an issue where audio and video would desynchronize when playing
concatenations of gapless content
([#4559](https://github.com/google/ExoPlayer/issues/4559)).
### 2.9.0 ###

View File

@ -632,7 +632,9 @@ public final class DefaultAudioSink implements AudioSink {
} else {
// Sanity check that presentationTimeUs is consistent with the expected value.
long expectedPresentationTimeUs =
startMediaTimeUs + inputFramesToDurationUs(getSubmittedFrames());
startMediaTimeUs
+ inputFramesToDurationUs(
getSubmittedFrames() - trimmingAudioProcessor.getTrimmedFrameCount());
if (startMediaTimeState == START_IN_SYNC
&& Math.abs(expectedPresentationTimeUs - presentationTimeUs) > 200000) {
Log.e(TAG, "Discontinuity detected [expected " + expectedPresentationTimeUs + ", got "
@ -955,6 +957,7 @@ public final class DefaultAudioSink implements AudioSink {
playbackParametersCheckpoints.clear();
playbackParametersOffsetUs = 0;
playbackParametersPositionUs = 0;
trimmingAudioProcessor.resetTrimmedFrameCount();
inputBuffer = null;
outputBuffer = null;
flushAudioProcessors();

View File

@ -25,11 +25,14 @@ import java.nio.ByteOrder;
/** Audio processor for trimming samples from the start/end of data. */
/* package */ final class TrimmingAudioProcessor implements AudioProcessor {
private static final int OUTPUT_ENCODING = C.ENCODING_PCM_16BIT;
private boolean isActive;
private int trimStartFrames;
private int trimEndFrames;
private int channelCount;
private int sampleRateHz;
private int bytesPerFrame;
private int pendingTrimStartBytes;
private ByteBuffer buffer;
@ -37,6 +40,7 @@ import java.nio.ByteOrder;
private byte[] endBuffer;
private int endBufferSize;
private boolean inputEnded;
private long trimmedFrameCount;
/** Creates a new audio processor for trimming samples from the start/end of data. */
public TrimmingAudioProcessor() {
@ -61,17 +65,34 @@ import java.nio.ByteOrder;
this.trimEndFrames = trimEndFrames;
}
/** Sets the trimmed frame count returned by {@link #getTrimmedFrameCount()} to zero. */
public void resetTrimmedFrameCount() {
trimmedFrameCount = 0;
}
/**
* Returns the number of audio frames trimmed since the last call to {@link
* #resetTrimmedFrameCount()}.
*/
public long getTrimmedFrameCount() {
return trimmedFrameCount;
}
@Override
public boolean configure(int sampleRateHz, int channelCount, @Encoding int encoding)
throws UnhandledFormatException {
if (encoding != C.ENCODING_PCM_16BIT) {
if (encoding != OUTPUT_ENCODING) {
throw new UnhandledFormatException(sampleRateHz, channelCount, encoding);
}
if (endBufferSize > 0) {
trimmedFrameCount += endBufferSize / bytesPerFrame;
}
this.channelCount = channelCount;
this.sampleRateHz = sampleRateHz;
endBuffer = new byte[trimEndFrames * channelCount * 2];
bytesPerFrame = Util.getPcmFrameSize(OUTPUT_ENCODING, channelCount);
endBuffer = new byte[trimEndFrames * bytesPerFrame];
endBufferSize = 0;
pendingTrimStartBytes = trimStartFrames * channelCount * 2;
pendingTrimStartBytes = trimStartFrames * bytesPerFrame;
boolean wasActive = isActive;
isActive = trimStartFrames != 0 || trimEndFrames != 0;
return wasActive != isActive;
@ -89,7 +110,7 @@ import java.nio.ByteOrder;
@Override
public int getOutputEncoding() {
return C.ENCODING_PCM_16BIT;
return OUTPUT_ENCODING;
}
@Override
@ -103,8 +124,13 @@ import java.nio.ByteOrder;
int limit = inputBuffer.limit();
int remaining = limit - position;
if (remaining == 0) {
return;
}
// Trim any pending start bytes from the input buffer.
int trimBytes = Math.min(remaining, pendingTrimStartBytes);
trimmedFrameCount += trimBytes / bytesPerFrame;
pendingTrimStartBytes -= trimBytes;
inputBuffer.position(position + trimBytes);
if (pendingTrimStartBytes > 0) {
@ -151,9 +177,26 @@ import java.nio.ByteOrder;
inputEnded = true;
}
@SuppressWarnings("ReferenceEquality")
@Override
public ByteBuffer getOutput() {
ByteBuffer outputBuffer = this.outputBuffer;
if (inputEnded && endBufferSize > 0 && outputBuffer == EMPTY_BUFFER) {
// Because audio processors may be drained in the middle of the stream we assume that the
// contents of the end buffer need to be output. Gapless transitions don't involve a call to
// queueEndOfStream so won't be affected. When audio is actually ending we play the padding
// data which is incorrect. This behavior can be fixed once we have the timestamps associated
// with input buffers.
if (buffer.capacity() < endBufferSize) {
buffer = ByteBuffer.allocateDirect(endBufferSize).order(ByteOrder.nativeOrder());
} else {
buffer.clear();
}
buffer.put(endBuffer, 0, endBufferSize);
endBufferSize = 0;
buffer.flip();
outputBuffer = buffer;
}
this.outputBuffer = EMPTY_BUFFER;
return outputBuffer;
}
@ -161,7 +204,7 @@ import java.nio.ByteOrder;
@SuppressWarnings("ReferenceEquality")
@Override
public boolean isEnded() {
return inputEnded && outputBuffer == EMPTY_BUFFER;
return inputEnded && endBufferSize == 0 && outputBuffer == EMPTY_BUFFER;
}
@Override