Factor out position tracking from DefaultAudioSink
DefaultAudioSink had a lot of code related to tracking the position of the underlying AudioTrack, and working around issues with the position APIs. Move this code to a separate class responsible for position tracking. This is in preparation for improving the audio timestamp polling behavior. This change is intended not to cause any behavior changes. Issue: #3841 ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=189344743
This commit is contained in:
parent
137d70b092
commit
245c59cecc
@ -21,6 +21,7 @@
|
|||||||
deprecated `DynamicConcatenatingMediaSource`.
|
deprecated `DynamicConcatenatingMediaSource`.
|
||||||
* Allow clipping of child media sources where the period and window have a
|
* Allow clipping of child media sources where the period and window have a
|
||||||
non-zero offset with `ClippingMediaSource`.
|
non-zero offset with `ClippingMediaSource`.
|
||||||
|
* Audio: Factor out `AudioTrack` position tracking from `DefaultAudioSink`.
|
||||||
|
|
||||||
### 2.7.1 ###
|
### 2.7.1 ###
|
||||||
|
|
||||||
|
@ -0,0 +1,609 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2018 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.google.android.exoplayer2.audio;
|
||||||
|
|
||||||
|
import android.annotation.TargetApi;
|
||||||
|
import android.media.AudioTimestamp;
|
||||||
|
import android.media.AudioTrack;
|
||||||
|
import android.os.SystemClock;
|
||||||
|
import android.support.annotation.IntDef;
|
||||||
|
import android.support.annotation.Nullable;
|
||||||
|
import com.google.android.exoplayer2.C;
|
||||||
|
import com.google.android.exoplayer2.util.Assertions;
|
||||||
|
import com.google.android.exoplayer2.util.Util;
|
||||||
|
import java.lang.annotation.Retention;
|
||||||
|
import java.lang.annotation.RetentionPolicy;
|
||||||
|
import java.lang.reflect.Method;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wraps an {@link AudioTrack}, exposing a position based on {@link
|
||||||
|
* AudioTrack#getPlaybackHeadPosition()} and {@link AudioTrack#getTimestamp(AudioTimestamp)}.
|
||||||
|
*
|
||||||
|
* <p>Call {@link #setAudioTrack(AudioTrack, int, int, int)} to set the audio track to wrap. Call
|
||||||
|
* {@link #mayHandleBuffer(long)} if there is input data to write to the track. If it returns false,
|
||||||
|
* the audio track position is stabilizing and no data may be written. Call {@link #start()}
|
||||||
|
* immediately before calling {@link AudioTrack#play()}. Call {@link #pause()} when pausing the
|
||||||
|
* track. Call {@link #handleEndOfStream(long)} when no more data will be written to the track. When
|
||||||
|
* the audio track will no longer be used, call {@link #reset()}.
|
||||||
|
*/
|
||||||
|
/* package */ final class AudioTrackPositionTracker {
|
||||||
|
|
||||||
|
/** Listener for position tracker events. */
|
||||||
|
public interface Listener {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called when the frame position is too far from the expected frame position.
|
||||||
|
*
|
||||||
|
* @param audioTimestampPositionFrames The frame position of the last known audio track
|
||||||
|
* timestamp.
|
||||||
|
* @param audioTimestampSystemTimeUs The system time associated with the last known audio track
|
||||||
|
* timestamp, in microseconds.
|
||||||
|
* @param systemTimeUs The current time.
|
||||||
|
* @param playbackPositionUs The current playback head position in microseconds.
|
||||||
|
*/
|
||||||
|
void onPositionFramesMismatch(
|
||||||
|
long audioTimestampPositionFrames,
|
||||||
|
long audioTimestampSystemTimeUs,
|
||||||
|
long systemTimeUs,
|
||||||
|
long playbackPositionUs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called when the system time associated with the last known audio track timestamp is
|
||||||
|
* unexpectedly far from the current time.
|
||||||
|
*
|
||||||
|
* @param audioTimestampPositionFrames The frame position of the last known audio track
|
||||||
|
* timestamp.
|
||||||
|
* @param audioTimestampSystemTimeUs The system time associated with the last known audio track
|
||||||
|
* timestamp, in microseconds.
|
||||||
|
* @param systemTimeUs The current time.
|
||||||
|
* @param playbackPositionUs The current playback head position in microseconds.
|
||||||
|
*/
|
||||||
|
void onSystemTimeUsMismatch(
|
||||||
|
long audioTimestampPositionFrames,
|
||||||
|
long audioTimestampSystemTimeUs,
|
||||||
|
long systemTimeUs,
|
||||||
|
long playbackPositionUs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called when the audio track has provided an invalid latency.
|
||||||
|
*
|
||||||
|
* @param latencyUs The reported latency in microseconds.
|
||||||
|
*/
|
||||||
|
void onInvalidLatency(long latencyUs);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called when the audio track runs out of data to play.
|
||||||
|
*
|
||||||
|
* @param bufferSize The size of the sink's buffer, in bytes.
|
||||||
|
* @param bufferSizeMs The size of the sink's buffer, in milliseconds, if it is configured for
|
||||||
|
* PCM output. {@link C#TIME_UNSET} if it is configured for encoded audio output, as the
|
||||||
|
* buffered media can have a variable bitrate so the duration may be unknown.
|
||||||
|
*/
|
||||||
|
void onUnderrun(int bufferSize, long bufferSizeMs);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** {@link AudioTrack} playback states. */
|
||||||
|
@Retention(RetentionPolicy.SOURCE)
|
||||||
|
@IntDef({PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, PLAYSTATE_PLAYING})
|
||||||
|
private @interface PlayState {}
|
||||||
|
/** @see AudioTrack#PLAYSTATE_STOPPED */
|
||||||
|
private static final int PLAYSTATE_STOPPED = AudioTrack.PLAYSTATE_STOPPED;
|
||||||
|
/** @see AudioTrack#PLAYSTATE_PAUSED */
|
||||||
|
private static final int PLAYSTATE_PAUSED = AudioTrack.PLAYSTATE_PAUSED;
|
||||||
|
/** @see AudioTrack#PLAYSTATE_PLAYING */
|
||||||
|
private static final int PLAYSTATE_PLAYING = AudioTrack.PLAYSTATE_PLAYING;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* AudioTrack timestamps are deemed spurious if they are offset from the system clock by more than
|
||||||
|
* this amount.
|
||||||
|
*
|
||||||
|
* <p>This is a fail safe that should not be required on correctly functioning devices.
|
||||||
|
*/
|
||||||
|
private static final long MAX_AUDIO_TIMESTAMP_OFFSET_US = 5 * C.MICROS_PER_SECOND;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* AudioTrack latencies are deemed impossibly large if they are greater than this amount.
|
||||||
|
*
|
||||||
|
* <p>This is a fail safe that should not be required on correctly functioning devices.
|
||||||
|
*/
|
||||||
|
private static final long MAX_LATENCY_US = 5 * C.MICROS_PER_SECOND;
|
||||||
|
|
||||||
|
private static final long FORCE_RESET_WORKAROUND_TIMEOUT_MS = 200;
|
||||||
|
|
||||||
|
private static final int MAX_PLAYHEAD_OFFSET_COUNT = 10;
|
||||||
|
private static final int MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US = 30000;
|
||||||
|
private static final int MIN_TIMESTAMP_SAMPLE_INTERVAL_US = 500000;
|
||||||
|
private static final int MIN_LATENCY_SAMPLE_INTERVAL_US = 500000;
|
||||||
|
|
||||||
|
private final Listener listener;
|
||||||
|
private final long[] playheadOffsets;
|
||||||
|
private AudioTrack audioTrack;
|
||||||
|
private int outputSampleRate;
|
||||||
|
private int bufferSize;
|
||||||
|
private long bufferSizeUs;
|
||||||
|
|
||||||
|
private long smoothedPlayheadOffsetUs;
|
||||||
|
private long lastPlayheadSampleTimeUs;
|
||||||
|
private boolean audioTimestampSet;
|
||||||
|
private long lastTimestampSampleTimeUs;
|
||||||
|
|
||||||
|
private Method getLatencyMethod;
|
||||||
|
private int outputPcmFrameSize;
|
||||||
|
private long resumeSystemTimeUs;
|
||||||
|
private long latencyUs;
|
||||||
|
private boolean hasData;
|
||||||
|
|
||||||
|
private boolean needsPassthroughWorkarounds;
|
||||||
|
private @Nullable AudioTimestampV19 audioTimestamp;
|
||||||
|
private boolean shouldSampleLatency;
|
||||||
|
private long lastLatencySampleTimeUs;
|
||||||
|
private long lastRawPlaybackHeadPosition;
|
||||||
|
private long rawPlaybackHeadWrapCount;
|
||||||
|
private long passthroughWorkaroundPauseOffset;
|
||||||
|
private int nextPlayheadOffsetIndex;
|
||||||
|
private int playheadOffsetCount;
|
||||||
|
private long stopTimestampUs;
|
||||||
|
private long forceResetWorkaroundTimeMs;
|
||||||
|
private long stopPlaybackHeadPosition;
|
||||||
|
private long endPlaybackHeadPosition;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new audio track position tracker.
|
||||||
|
*
|
||||||
|
* @param listener A listener for position tracking events.
|
||||||
|
*/
|
||||||
|
public AudioTrackPositionTracker(Listener listener) {
|
||||||
|
this.listener = Assertions.checkNotNull(listener);
|
||||||
|
if (Util.SDK_INT >= 18) {
|
||||||
|
try {
|
||||||
|
getLatencyMethod = AudioTrack.class.getMethod("getLatency", (Class<?>[]) null);
|
||||||
|
} catch (NoSuchMethodException e) {
|
||||||
|
// There's no guarantee this method exists. Do nothing.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
playheadOffsets = new long[MAX_PLAYHEAD_OFFSET_COUNT];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
|
||||||
|
* track's position, until the next call to {@link #reset()}.
|
||||||
|
*
|
||||||
|
* @param audioTrack The audio track to wrap.
|
||||||
|
* @param outputEncoding The encoding of the audio track.
|
||||||
|
* @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
|
||||||
|
* otherwise.
|
||||||
|
* @param bufferSize The audio track buffer size in bytes.
|
||||||
|
*/
|
||||||
|
public void setAudioTrack(
|
||||||
|
AudioTrack audioTrack,
|
||||||
|
@C.Encoding int outputEncoding,
|
||||||
|
int outputPcmFrameSize,
|
||||||
|
int bufferSize) {
|
||||||
|
this.audioTrack = audioTrack;
|
||||||
|
this.bufferSize = bufferSize;
|
||||||
|
this.outputPcmFrameSize = outputPcmFrameSize;
|
||||||
|
outputSampleRate = audioTrack.getSampleRate();
|
||||||
|
needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
|
||||||
|
boolean isOutputPcm = Util.isEncodingPcm(outputEncoding);
|
||||||
|
bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
|
||||||
|
if (Util.SDK_INT >= 19) {
|
||||||
|
audioTimestamp = new AudioTimestampV19(audioTrack);
|
||||||
|
}
|
||||||
|
shouldSampleLatency = isOutputPcm && getLatencyMethod != null;
|
||||||
|
lastRawPlaybackHeadPosition = 0;
|
||||||
|
rawPlaybackHeadWrapCount = 0;
|
||||||
|
passthroughWorkaroundPauseOffset = 0;
|
||||||
|
hasData = false;
|
||||||
|
stopTimestampUs = C.TIME_UNSET;
|
||||||
|
forceResetWorkaroundTimeMs = C.TIME_UNSET;
|
||||||
|
latencyUs = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getCurrentPositionUs(boolean sourceEnded) {
|
||||||
|
if (audioTrack.getPlayState() == PLAYSTATE_PLAYING) {
|
||||||
|
maybeSampleSyncParams();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the device supports it, use the playback timestamp from AudioTrack.getTimestamp.
|
||||||
|
// Otherwise, derive a smoothed position by sampling the track's frame position.
|
||||||
|
long systemTimeUs = System.nanoTime() / 1000;
|
||||||
|
long positionUs;
|
||||||
|
if (audioTimestamp != null && audioTimestampSet) {
|
||||||
|
// Calculate the speed-adjusted position using the timestamp (which may be in the future).
|
||||||
|
long elapsedSinceTimestampUs = systemTimeUs - audioTimestamp.getTimestampSystemTimeUs();
|
||||||
|
long elapsedSinceTimestampFrames = durationUsToFrames(elapsedSinceTimestampUs);
|
||||||
|
long elapsedFrames =
|
||||||
|
audioTimestamp.getTimestampPositionFrames() + elapsedSinceTimestampFrames;
|
||||||
|
positionUs = framesToDurationUs(elapsedFrames);
|
||||||
|
} else {
|
||||||
|
if (playheadOffsetCount == 0) {
|
||||||
|
// The AudioTrack has started, but we don't have any samples to compute a smoothed position.
|
||||||
|
positionUs = getPlaybackHeadPositionUs();
|
||||||
|
} else {
|
||||||
|
// getPlaybackHeadPositionUs() only has a granularity of ~20 ms, so we base the position off
|
||||||
|
// the system clock (and a smoothed offset between it and the playhead position) so as to
|
||||||
|
// prevent jitter in the reported positions.
|
||||||
|
positionUs = systemTimeUs + smoothedPlayheadOffsetUs;
|
||||||
|
}
|
||||||
|
if (!sourceEnded) {
|
||||||
|
positionUs -= latencyUs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return positionUs;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Starts position tracking. Must be called immediately before {@link AudioTrack#play()}. */
|
||||||
|
public void start() {
|
||||||
|
resumeSystemTimeUs = System.nanoTime() / 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns whether the audio track is in the playing state. */
|
||||||
|
public boolean isPlaying() {
|
||||||
|
return audioTrack.getPlayState() == PLAYSTATE_PLAYING;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks the state of the audio track and returns whether the caller can write data to the track.
|
||||||
|
* Notifies {@link Listener#onUnderrun(int, long)} if the track has underrun.
|
||||||
|
*
|
||||||
|
* @param writtenFrames The number of frames that have been written.
|
||||||
|
* @return Whether the caller can write data to the track.
|
||||||
|
*/
|
||||||
|
public boolean mayHandleBuffer(long writtenFrames) {
|
||||||
|
@PlayState int playState = audioTrack.getPlayState();
|
||||||
|
if (needsPassthroughWorkarounds) {
|
||||||
|
// An AC-3 audio track continues to play data written while it is paused. Stop writing so its
|
||||||
|
// buffer empties. See [Internal: b/18899620].
|
||||||
|
if (playState == PLAYSTATE_PAUSED) {
|
||||||
|
// We force an underrun to pause the track, so don't notify the listener in this case.
|
||||||
|
hasData = false;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A new AC-3 audio track's playback position continues to increase from the old track's
|
||||||
|
// position for a short time after is has been released. Avoid writing data until the playback
|
||||||
|
// head position actually returns to zero.
|
||||||
|
if (playState == PLAYSTATE_STOPPED && getPlaybackHeadPosition() == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean hadData = hasData;
|
||||||
|
hasData = hasPendingData(writtenFrames);
|
||||||
|
if (hadData && !hasData && playState != PLAYSTATE_STOPPED && listener != null) {
|
||||||
|
listener.onUnderrun(bufferSize, C.usToMs(bufferSizeUs));
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an estimate of the number of additional bytes that can be written to the audio track's
|
||||||
|
* buffer without running out of space.
|
||||||
|
*
|
||||||
|
* <p>May only be called if the output encoding is one of the PCM encodings.
|
||||||
|
*
|
||||||
|
* @param writtenBytes The number of bytes written to the audio track so far.
|
||||||
|
* @return An estimate of the number of bytes that can be written.
|
||||||
|
*/
|
||||||
|
public int getAvailableBufferSize(long writtenBytes) {
|
||||||
|
int bytesPending = (int) (writtenBytes - (getPlaybackHeadPosition() * outputPcmFrameSize));
|
||||||
|
return bufferSize - bytesPending;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns whether the track is in an invalid state and must be recreated. */
|
||||||
|
public boolean isStalled(long writtenFrames) {
|
||||||
|
return forceResetWorkaroundTimeMs != C.TIME_UNSET
|
||||||
|
&& writtenFrames > 0
|
||||||
|
&& SystemClock.elapsedRealtime() - forceResetWorkaroundTimeMs
|
||||||
|
>= FORCE_RESET_WORKAROUND_TIMEOUT_MS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Records the writing position at which the stream ended, so that the reported position can
|
||||||
|
* continue to increment while remaining data is played out.
|
||||||
|
*
|
||||||
|
* @param writtenFrames The number of frames that have been written.
|
||||||
|
*/
|
||||||
|
public void handleEndOfStream(long writtenFrames) {
|
||||||
|
stopPlaybackHeadPosition = getPlaybackHeadPosition();
|
||||||
|
stopTimestampUs = SystemClock.elapsedRealtime() * 1000;
|
||||||
|
endPlaybackHeadPosition = writtenFrames;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns whether the audio track has any pending data to play out at its current position.
|
||||||
|
*
|
||||||
|
* @param writtenFrames The number of frames written to the audio track.
|
||||||
|
* @return Whether the audio track has any pending data to play out.
|
||||||
|
*/
|
||||||
|
public boolean hasPendingData(long writtenFrames) {
|
||||||
|
return writtenFrames > getPlaybackHeadPosition()
|
||||||
|
|| forceHasPendingData();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pauses the audio track position tracker, returning whether the audio track needs to be paused
|
||||||
|
* to cause playback to pause. If {@code false} is returned the audio track will pause without
|
||||||
|
* further interaction, as the end of stream has been handled.
|
||||||
|
*/
|
||||||
|
public boolean pause() {
|
||||||
|
resetSyncParams();
|
||||||
|
return stopTimestampUs == C.TIME_UNSET;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resets the position tracker. Should be called when the audio track previous passed to {@link
|
||||||
|
* #setAudioTrack(AudioTrack, int, int, int)} is no longer in use.
|
||||||
|
*/
|
||||||
|
public void reset() {
|
||||||
|
resetSyncParams();
|
||||||
|
audioTrack = null;
|
||||||
|
audioTimestamp = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void maybeSampleSyncParams() {
|
||||||
|
long playbackPositionUs = getPlaybackHeadPositionUs();
|
||||||
|
if (playbackPositionUs == 0) {
|
||||||
|
// The AudioTrack hasn't output anything yet.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
long systemTimeUs = System.nanoTime() / 1000;
|
||||||
|
if (systemTimeUs - lastPlayheadSampleTimeUs >= MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US) {
|
||||||
|
// Take a new sample and update the smoothed offset between the system clock and the playhead.
|
||||||
|
playheadOffsets[nextPlayheadOffsetIndex] = playbackPositionUs - systemTimeUs;
|
||||||
|
nextPlayheadOffsetIndex = (nextPlayheadOffsetIndex + 1) % MAX_PLAYHEAD_OFFSET_COUNT;
|
||||||
|
if (playheadOffsetCount < MAX_PLAYHEAD_OFFSET_COUNT) {
|
||||||
|
playheadOffsetCount++;
|
||||||
|
}
|
||||||
|
lastPlayheadSampleTimeUs = systemTimeUs;
|
||||||
|
smoothedPlayheadOffsetUs = 0;
|
||||||
|
for (int i = 0; i < playheadOffsetCount; i++) {
|
||||||
|
smoothedPlayheadOffsetUs += playheadOffsets[i] / playheadOffsetCount;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (needsPassthroughWorkarounds) {
|
||||||
|
// Don't sample the timestamp and latency if this is an AC-3 passthrough AudioTrack on
|
||||||
|
// platform API versions 21/22, as incorrect values are returned. See [Internal: b/21145353].
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
maybeUpdateAudioTimestamp(systemTimeUs, playbackPositionUs);
|
||||||
|
maybeUpdateLatency(systemTimeUs);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void maybeUpdateAudioTimestamp(long systemTimeUs, long playbackPositionUs) {
|
||||||
|
if (audioTimestamp != null
|
||||||
|
&& systemTimeUs - lastTimestampSampleTimeUs >= MIN_TIMESTAMP_SAMPLE_INTERVAL_US) {
|
||||||
|
audioTimestampSet = audioTimestamp.maybeUpdateTimestamp();
|
||||||
|
if (audioTimestampSet) {
|
||||||
|
// Perform sanity checks on the timestamp.
|
||||||
|
long audioTimestampSystemTimeUs = audioTimestamp.getTimestampSystemTimeUs();
|
||||||
|
long audioTimestampPositionFrames = audioTimestamp.getTimestampPositionFrames();
|
||||||
|
if (audioTimestampSystemTimeUs < resumeSystemTimeUs) {
|
||||||
|
// The timestamp corresponds to a time before the track was most recently resumed.
|
||||||
|
audioTimestampSet = false;
|
||||||
|
} else if (Math.abs(audioTimestampSystemTimeUs - systemTimeUs)
|
||||||
|
> MAX_AUDIO_TIMESTAMP_OFFSET_US) {
|
||||||
|
listener.onSystemTimeUsMismatch(
|
||||||
|
audioTimestampPositionFrames,
|
||||||
|
audioTimestampSystemTimeUs,
|
||||||
|
systemTimeUs,
|
||||||
|
playbackPositionUs);
|
||||||
|
audioTimestampSet = false;
|
||||||
|
} else if (Math.abs(framesToDurationUs(audioTimestampPositionFrames) - playbackPositionUs)
|
||||||
|
> MAX_AUDIO_TIMESTAMP_OFFSET_US) {
|
||||||
|
listener.onPositionFramesMismatch(
|
||||||
|
audioTimestampPositionFrames,
|
||||||
|
audioTimestampSystemTimeUs,
|
||||||
|
systemTimeUs,
|
||||||
|
playbackPositionUs);
|
||||||
|
audioTimestampSet = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastTimestampSampleTimeUs = systemTimeUs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void maybeUpdateLatency(long systemTimeUs) {
|
||||||
|
if (shouldSampleLatency
|
||||||
|
&& systemTimeUs - lastLatencySampleTimeUs >= MIN_LATENCY_SAMPLE_INTERVAL_US) {
|
||||||
|
try {
|
||||||
|
// Compute the audio track latency, excluding the latency due to the buffer (leaving
|
||||||
|
// latency due to the mixer and audio hardware driver).
|
||||||
|
latencyUs =
|
||||||
|
(Integer) getLatencyMethod.invoke(audioTrack, (Object[]) null) * 1000L - bufferSizeUs;
|
||||||
|
// Sanity check that the latency is non-negative.
|
||||||
|
latencyUs = Math.max(latencyUs, 0);
|
||||||
|
// Sanity check that the latency isn't too large.
|
||||||
|
if (latencyUs > MAX_LATENCY_US) {
|
||||||
|
listener.onInvalidLatency(latencyUs);
|
||||||
|
latencyUs = 0;
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
// The method existed, but doesn't work. Don't try again.
|
||||||
|
getLatencyMethod = null;
|
||||||
|
}
|
||||||
|
lastLatencySampleTimeUs = systemTimeUs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private long framesToDurationUs(long frameCount) {
|
||||||
|
return (frameCount * C.MICROS_PER_SECOND) / outputSampleRate;
|
||||||
|
}
|
||||||
|
|
||||||
|
private long durationUsToFrames(long durationUs) {
|
||||||
|
return (durationUs * outputSampleRate) / C.MICROS_PER_SECOND;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void resetSyncParams() {
|
||||||
|
smoothedPlayheadOffsetUs = 0;
|
||||||
|
playheadOffsetCount = 0;
|
||||||
|
nextPlayheadOffsetIndex = 0;
|
||||||
|
lastPlayheadSampleTimeUs = 0;
|
||||||
|
audioTimestampSet = false;
|
||||||
|
lastTimestampSampleTimeUs = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If passthrough workarounds are enabled, pausing is implemented by forcing the AudioTrack to
|
||||||
|
* underrun. In this case, still behave as if we have pending data, otherwise writing won't
|
||||||
|
* resume.
|
||||||
|
*/
|
||||||
|
private boolean forceHasPendingData() {
|
||||||
|
return needsPassthroughWorkarounds
|
||||||
|
&& audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PAUSED
|
||||||
|
&& getPlaybackHeadPosition() == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns whether to work around problems with passthrough audio tracks. See [Internal:
|
||||||
|
* b/18899620, b/19187573, b/21145353].
|
||||||
|
*/
|
||||||
|
private static boolean needsPassthroughWorkarounds(@C.Encoding int outputEncoding) {
|
||||||
|
return Util.SDK_INT < 23
|
||||||
|
&& (outputEncoding == C.ENCODING_AC3 || outputEncoding == C.ENCODING_E_AC3);
|
||||||
|
}
|
||||||
|
|
||||||
|
private long getPlaybackHeadPositionUs() {
|
||||||
|
return framesToDurationUs(getPlaybackHeadPosition());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link AudioTrack#getPlaybackHeadPosition()} returns a value intended to be interpreted as an
|
||||||
|
* unsigned 32 bit integer, which also wraps around periodically. This method returns the playback
|
||||||
|
* head position as a long that will only wrap around if the value exceeds {@link Long#MAX_VALUE}
|
||||||
|
* (which in practice will never happen).
|
||||||
|
*
|
||||||
|
* @return The playback head position, in frames.
|
||||||
|
*/
|
||||||
|
private long getPlaybackHeadPosition() {
|
||||||
|
if (stopTimestampUs != C.TIME_UNSET) {
|
||||||
|
// Simulate the playback head position up to the total number of frames submitted.
|
||||||
|
long elapsedTimeSinceStopUs = (SystemClock.elapsedRealtime() * 1000) - stopTimestampUs;
|
||||||
|
long framesSinceStop = (elapsedTimeSinceStopUs * outputSampleRate) / C.MICROS_PER_SECOND;
|
||||||
|
return Math.min(endPlaybackHeadPosition, stopPlaybackHeadPosition + framesSinceStop);
|
||||||
|
}
|
||||||
|
|
||||||
|
int state = audioTrack.getPlayState();
|
||||||
|
if (state == PLAYSTATE_STOPPED) {
|
||||||
|
// The audio track hasn't been started.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
long rawPlaybackHeadPosition = 0xFFFFFFFFL & audioTrack.getPlaybackHeadPosition();
|
||||||
|
if (needsPassthroughWorkarounds) {
|
||||||
|
// Work around an issue with passthrough/direct AudioTracks on platform API versions 21/22
|
||||||
|
// where the playback head position jumps back to zero on paused passthrough/direct audio
|
||||||
|
// tracks. See [Internal: b/19187573].
|
||||||
|
if (state == PLAYSTATE_PAUSED && rawPlaybackHeadPosition == 0) {
|
||||||
|
passthroughWorkaroundPauseOffset = lastRawPlaybackHeadPosition;
|
||||||
|
}
|
||||||
|
rawPlaybackHeadPosition += passthroughWorkaroundPauseOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Util.SDK_INT <= 28) {
|
||||||
|
if (rawPlaybackHeadPosition == 0
|
||||||
|
&& lastRawPlaybackHeadPosition > 0
|
||||||
|
&& state == PLAYSTATE_PLAYING) {
|
||||||
|
// If connecting a Bluetooth audio device fails, the AudioTrack may be left in a state
|
||||||
|
// where its Java API is in the playing state, but the native track is stopped. When this
|
||||||
|
// happens the playback head position gets stuck at zero. In this case, return the old
|
||||||
|
// playback head position and force the track to be reset after
|
||||||
|
// {@link #FORCE_RESET_WORKAROUND_TIMEOUT_MS} has elapsed.
|
||||||
|
if (forceResetWorkaroundTimeMs == C.TIME_UNSET) {
|
||||||
|
forceResetWorkaroundTimeMs = SystemClock.elapsedRealtime();
|
||||||
|
}
|
||||||
|
return lastRawPlaybackHeadPosition;
|
||||||
|
} else {
|
||||||
|
forceResetWorkaroundTimeMs = C.TIME_UNSET;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lastRawPlaybackHeadPosition > rawPlaybackHeadPosition) {
|
||||||
|
// The value must have wrapped around.
|
||||||
|
rawPlaybackHeadWrapCount++;
|
||||||
|
}
|
||||||
|
lastRawPlaybackHeadPosition = rawPlaybackHeadPosition;
|
||||||
|
return rawPlaybackHeadPosition + (rawPlaybackHeadWrapCount << 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
@TargetApi(19)
|
||||||
|
private static final class AudioTimestampV19 {
|
||||||
|
|
||||||
|
private final AudioTrack audioTrack;
|
||||||
|
private final AudioTimestamp audioTimestamp;
|
||||||
|
|
||||||
|
private long rawTimestampFramePositionWrapCount;
|
||||||
|
private long lastTimestampRawPositionFrames;
|
||||||
|
private long lastTimestampPositionFrames;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new {@link AudioTimestamp} wrapper.
|
||||||
|
*
|
||||||
|
* @param audioTrack The audio track that will provide timestamps.
|
||||||
|
*/
|
||||||
|
public AudioTimestampV19(AudioTrack audioTrack) {
|
||||||
|
this.audioTrack = audioTrack;
|
||||||
|
audioTimestamp = new AudioTimestamp();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to update the audio track timestamp. Returns {@code true} if the timestamp was
|
||||||
|
* updated, in which case the updated timestamp system time and position can be accessed with
|
||||||
|
* {@link #getTimestampSystemTimeUs()} and {@link #getTimestampPositionFrames()}. Returns {@code
|
||||||
|
* false} if no timestamp is available, in which case those methods should not be called.
|
||||||
|
*/
|
||||||
|
public boolean maybeUpdateTimestamp() {
|
||||||
|
boolean updated = audioTrack.getTimestamp(audioTimestamp);
|
||||||
|
if (updated) {
|
||||||
|
long rawPositionFrames = audioTimestamp.framePosition;
|
||||||
|
if (lastTimestampRawPositionFrames > rawPositionFrames) {
|
||||||
|
// The value must have wrapped around.
|
||||||
|
rawTimestampFramePositionWrapCount++;
|
||||||
|
}
|
||||||
|
lastTimestampRawPositionFrames = rawPositionFrames;
|
||||||
|
lastTimestampPositionFrames =
|
||||||
|
rawPositionFrames + (rawTimestampFramePositionWrapCount << 32);
|
||||||
|
}
|
||||||
|
return updated;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the {@link android.media.AudioTimestamp#nanoTime} obtained during the most recent
|
||||||
|
* call to {@link #maybeUpdateTimestamp()} that returned true.
|
||||||
|
*
|
||||||
|
* @return The nanoTime obtained during the most recent call to {@link #maybeUpdateTimestamp()}
|
||||||
|
* that returned true.
|
||||||
|
*/
|
||||||
|
public long getTimestampSystemTimeUs() {
|
||||||
|
return audioTimestamp.nanoTime / 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the {@link android.media.AudioTimestamp#framePosition} obtained during the most
|
||||||
|
* recent call to {@link #maybeUpdateTimestamp()} that returned true. The value is adjusted so
|
||||||
|
* that wrap around only occurs if the value exceeds {@link Long#MAX_VALUE} (which in practice
|
||||||
|
* will never happen).
|
||||||
|
*
|
||||||
|
* @return The framePosition obtained during the most recent call to {@link
|
||||||
|
* #maybeUpdateTimestamp()} that returned true.
|
||||||
|
*/
|
||||||
|
public long getTimestampPositionFrames() {
|
||||||
|
return lastTimestampPositionFrames;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -19,7 +19,6 @@ import android.annotation.SuppressLint;
|
|||||||
import android.annotation.TargetApi;
|
import android.annotation.TargetApi;
|
||||||
import android.media.AudioFormat;
|
import android.media.AudioFormat;
|
||||||
import android.media.AudioManager;
|
import android.media.AudioManager;
|
||||||
import android.media.AudioTimestamp;
|
|
||||||
import android.media.AudioTrack;
|
import android.media.AudioTrack;
|
||||||
import android.os.ConditionVariable;
|
import android.os.ConditionVariable;
|
||||||
import android.os.SystemClock;
|
import android.os.SystemClock;
|
||||||
@ -32,7 +31,6 @@ import com.google.android.exoplayer2.util.Assertions;
|
|||||||
import com.google.android.exoplayer2.util.Util;
|
import com.google.android.exoplayer2.util.Util;
|
||||||
import java.lang.annotation.Retention;
|
import java.lang.annotation.Retention;
|
||||||
import java.lang.annotation.RetentionPolicy;
|
import java.lang.annotation.RetentionPolicy;
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.nio.ByteOrder;
|
import java.nio.ByteOrder;
|
||||||
import java.util.ArrayDeque;
|
import java.util.ArrayDeque;
|
||||||
@ -50,8 +48,8 @@ import java.util.ArrayList;
|
|||||||
public final class DefaultAudioSink implements AudioSink {
|
public final class DefaultAudioSink implements AudioSink {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thrown when {@link AudioTrack#getTimestamp} returns a spurious timestamp, if
|
* Thrown when the audio track has provided a spurious timestamp, if {@link
|
||||||
* {@link #failOnSpuriousAudioTimestamp} is set.
|
* #failOnSpuriousAudioTimestamp} is set.
|
||||||
*/
|
*/
|
||||||
public static final class InvalidAudioTrackTimestampException extends RuntimeException {
|
public static final class InvalidAudioTrackTimestampException extends RuntimeException {
|
||||||
|
|
||||||
@ -84,22 +82,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
*/
|
*/
|
||||||
private static final int BUFFER_MULTIPLICATION_FACTOR = 4;
|
private static final int BUFFER_MULTIPLICATION_FACTOR = 4;
|
||||||
|
|
||||||
/** {@link AudioTrack} playback states. */
|
|
||||||
@Retention(RetentionPolicy.SOURCE)
|
|
||||||
@IntDef({PLAYSTATE_STOPPED, PLAYSTATE_PAUSED, PLAYSTATE_PLAYING})
|
|
||||||
private @interface PlayState {}
|
|
||||||
/**
|
|
||||||
* @see AudioTrack#PLAYSTATE_STOPPED
|
|
||||||
*/
|
|
||||||
private static final int PLAYSTATE_STOPPED = AudioTrack.PLAYSTATE_STOPPED;
|
|
||||||
/**
|
|
||||||
* @see AudioTrack#PLAYSTATE_PAUSED
|
|
||||||
*/
|
|
||||||
private static final int PLAYSTATE_PAUSED = AudioTrack.PLAYSTATE_PAUSED;
|
|
||||||
/**
|
|
||||||
* @see AudioTrack#PLAYSTATE_PLAYING
|
|
||||||
*/
|
|
||||||
private static final int PLAYSTATE_PLAYING = AudioTrack.PLAYSTATE_PLAYING;
|
|
||||||
/**
|
/**
|
||||||
* @see AudioTrack#ERROR_BAD_VALUE
|
* @see AudioTrack#ERROR_BAD_VALUE
|
||||||
*/
|
*/
|
||||||
@ -124,21 +106,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
private static final String TAG = "AudioTrack";
|
private static final String TAG = "AudioTrack";
|
||||||
|
|
||||||
/**
|
|
||||||
* AudioTrack timestamps are deemed spurious if they are offset from the system clock by more
|
|
||||||
* than this amount.
|
|
||||||
* <p>
|
|
||||||
* This is a fail safe that should not be required on correctly functioning devices.
|
|
||||||
*/
|
|
||||||
private static final long MAX_AUDIO_TIMESTAMP_OFFSET_US = 5 * C.MICROS_PER_SECOND;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* AudioTrack latencies are deemed impossibly large if they are greater than this amount.
|
|
||||||
* <p>
|
|
||||||
* This is a fail safe that should not be required on correctly functioning devices.
|
|
||||||
*/
|
|
||||||
private static final long MAX_LATENCY_US = 5 * C.MICROS_PER_SECOND;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents states of the {@link #startMediaTimeUs} value.
|
* Represents states of the {@link #startMediaTimeUs} value.
|
||||||
*/
|
*/
|
||||||
@ -149,10 +116,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private static final int START_IN_SYNC = 1;
|
private static final int START_IN_SYNC = 1;
|
||||||
private static final int START_NEED_SYNC = 2;
|
private static final int START_NEED_SYNC = 2;
|
||||||
|
|
||||||
private static final int MAX_PLAYHEAD_OFFSET_COUNT = 10;
|
|
||||||
private static final int MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US = 30000;
|
|
||||||
private static final int MIN_TIMESTAMP_SAMPLE_INTERVAL_US = 500000;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Whether to enable a workaround for an issue where an audio effect does not keep its session
|
* Whether to enable a workaround for an issue where an audio effect does not keep its session
|
||||||
* active across releasing/initializing a new audio track, on platform builds where
|
* active across releasing/initializing a new audio track, on platform builds where
|
||||||
@ -179,9 +142,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private final AudioProcessor[] toIntPcmAvailableAudioProcessors;
|
private final AudioProcessor[] toIntPcmAvailableAudioProcessors;
|
||||||
private final AudioProcessor[] toFloatPcmAvailableAudioProcessors;
|
private final AudioProcessor[] toFloatPcmAvailableAudioProcessors;
|
||||||
private final ConditionVariable releasingConditionVariable;
|
private final ConditionVariable releasingConditionVariable;
|
||||||
private final long[] playheadOffsets;
|
private final AudioTrackPositionTracker audioTrackPositionTracker;
|
||||||
private final AudioTrackUtil audioTrackUtil;
|
|
||||||
private final AudioTrackPositionErrorListener audioTrackPositionErrorListener;
|
|
||||||
private final ArrayDeque<PlaybackParametersCheckpoint> playbackParametersCheckpoints;
|
private final ArrayDeque<PlaybackParametersCheckpoint> playbackParametersCheckpoints;
|
||||||
|
|
||||||
@Nullable private Listener listener;
|
@Nullable private Listener listener;
|
||||||
@ -200,7 +161,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private boolean processingEnabled;
|
private boolean processingEnabled;
|
||||||
private boolean canApplyPlaybackParameters;
|
private boolean canApplyPlaybackParameters;
|
||||||
private int bufferSize;
|
private int bufferSize;
|
||||||
private long bufferSizeUs;
|
|
||||||
|
|
||||||
private PlaybackParameters drainingPlaybackParameters;
|
private PlaybackParameters drainingPlaybackParameters;
|
||||||
private PlaybackParameters playbackParameters;
|
private PlaybackParameters playbackParameters;
|
||||||
@ -210,14 +170,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private ByteBuffer avSyncHeader;
|
private ByteBuffer avSyncHeader;
|
||||||
private int bytesUntilNextAvSync;
|
private int bytesUntilNextAvSync;
|
||||||
|
|
||||||
private int nextPlayheadOffsetIndex;
|
|
||||||
private int playheadOffsetCount;
|
|
||||||
private long smoothedPlayheadOffsetUs;
|
|
||||||
private long lastPlayheadSampleTimeUs;
|
|
||||||
private boolean audioTimestampSet;
|
|
||||||
private long lastTimestampSampleTimeUs;
|
|
||||||
|
|
||||||
private Method getLatencyMethod;
|
|
||||||
private int pcmFrameSize;
|
private int pcmFrameSize;
|
||||||
private long submittedPcmBytes;
|
private long submittedPcmBytes;
|
||||||
private long submittedEncodedFrames;
|
private long submittedEncodedFrames;
|
||||||
@ -227,8 +179,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private int framesPerEncodedSample;
|
private int framesPerEncodedSample;
|
||||||
private @StartMediaTimeState int startMediaTimeState;
|
private @StartMediaTimeState int startMediaTimeState;
|
||||||
private long startMediaTimeUs;
|
private long startMediaTimeUs;
|
||||||
private long resumeSystemTimeUs;
|
|
||||||
private long latencyUs;
|
|
||||||
private float volume;
|
private float volume;
|
||||||
|
|
||||||
private AudioProcessor[] audioProcessors;
|
private AudioProcessor[] audioProcessors;
|
||||||
@ -243,7 +193,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
private boolean playing;
|
private boolean playing;
|
||||||
private int audioSessionId;
|
private int audioSessionId;
|
||||||
private boolean tunneling;
|
private boolean tunneling;
|
||||||
private boolean hasData;
|
|
||||||
private long lastFeedElapsedRealtimeMs;
|
private long lastFeedElapsedRealtimeMs;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -274,19 +223,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
this.audioCapabilities = audioCapabilities;
|
this.audioCapabilities = audioCapabilities;
|
||||||
this.enableConvertHighResIntPcmToFloat = enableConvertHighResIntPcmToFloat;
|
this.enableConvertHighResIntPcmToFloat = enableConvertHighResIntPcmToFloat;
|
||||||
releasingConditionVariable = new ConditionVariable(true);
|
releasingConditionVariable = new ConditionVariable(true);
|
||||||
if (Util.SDK_INT >= 18) {
|
audioTrackPositionTracker = new AudioTrackPositionTracker(new PositionTrackerListener());
|
||||||
try {
|
|
||||||
getLatencyMethod = AudioTrack.class.getMethod("getLatency", (Class<?>[]) null);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
// There's no guarantee this method exists. Do nothing.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (Util.SDK_INT >= 19) {
|
|
||||||
audioTrackUtil = new AudioTrackUtilV19();
|
|
||||||
} else {
|
|
||||||
audioTrackUtil = new AudioTrackUtil();
|
|
||||||
}
|
|
||||||
audioTrackPositionErrorListener = new DefaultAudioTrackPositionErrorListener();
|
|
||||||
channelMappingAudioProcessor = new ChannelMappingAudioProcessor();
|
channelMappingAudioProcessor = new ChannelMappingAudioProcessor();
|
||||||
trimmingAudioProcessor = new TrimmingAudioProcessor();
|
trimmingAudioProcessor = new TrimmingAudioProcessor();
|
||||||
sonicAudioProcessor = new SonicAudioProcessor();
|
sonicAudioProcessor = new SonicAudioProcessor();
|
||||||
@ -298,7 +235,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
audioProcessors, 0, toIntPcmAvailableAudioProcessors, 3, audioProcessors.length);
|
audioProcessors, 0, toIntPcmAvailableAudioProcessors, 3, audioProcessors.length);
|
||||||
toIntPcmAvailableAudioProcessors[3 + audioProcessors.length] = sonicAudioProcessor;
|
toIntPcmAvailableAudioProcessors[3 + audioProcessors.length] = sonicAudioProcessor;
|
||||||
toFloatPcmAvailableAudioProcessors = new AudioProcessor[] {new FloatResamplingAudioProcessor()};
|
toFloatPcmAvailableAudioProcessors = new AudioProcessor[] {new FloatResamplingAudioProcessor()};
|
||||||
playheadOffsets = new long[MAX_PLAYHEAD_OFFSET_COUNT];
|
|
||||||
volume = 1.0f;
|
volume = 1.0f;
|
||||||
startMediaTimeState = START_NOT_SET;
|
startMediaTimeState = START_NOT_SET;
|
||||||
audioAttributes = AudioAttributes.DEFAULT;
|
audioAttributes = AudioAttributes.DEFAULT;
|
||||||
@ -331,40 +267,10 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getCurrentPositionUs(boolean sourceEnded) {
|
public long getCurrentPositionUs(boolean sourceEnded) {
|
||||||
if (!hasCurrentPositionUs()) {
|
if (!isInitialized() || startMediaTimeState == START_NOT_SET) {
|
||||||
return CURRENT_POSITION_NOT_SET;
|
return CURRENT_POSITION_NOT_SET;
|
||||||
}
|
}
|
||||||
|
long positionUs = audioTrackPositionTracker.getCurrentPositionUs(sourceEnded);
|
||||||
if (audioTrack.getPlayState() == PLAYSTATE_PLAYING) {
|
|
||||||
maybeSampleSyncParams();
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the device supports it, use the playback timestamp from AudioTrack.getTimestamp.
|
|
||||||
// Otherwise, derive a smoothed position by sampling the track's frame position.
|
|
||||||
long systemClockUs = System.nanoTime() / 1000;
|
|
||||||
long positionUs;
|
|
||||||
if (audioTimestampSet) {
|
|
||||||
// Calculate the speed-adjusted position using the timestamp (which may be in the future).
|
|
||||||
long elapsedSinceTimestampUs = systemClockUs - audioTrackUtil.getTimestampSystemTimeUs();
|
|
||||||
long elapsedSinceTimestampFrames = durationUsToFrames(elapsedSinceTimestampUs);
|
|
||||||
long elapsedFrames =
|
|
||||||
audioTrackUtil.getTimestampPositionFrames() + elapsedSinceTimestampFrames;
|
|
||||||
positionUs = framesToDurationUs(elapsedFrames);
|
|
||||||
} else {
|
|
||||||
if (playheadOffsetCount == 0) {
|
|
||||||
// The AudioTrack has started, but we don't have any samples to compute a smoothed position.
|
|
||||||
positionUs = audioTrackUtil.getPositionUs();
|
|
||||||
} else {
|
|
||||||
// getPlayheadPositionUs() only has a granularity of ~20 ms, so we base the position off the
|
|
||||||
// system clock (and a smoothed offset between it and the playhead position) so as to
|
|
||||||
// prevent jitter in the reported positions.
|
|
||||||
positionUs = systemClockUs + smoothedPlayheadOffsetUs;
|
|
||||||
}
|
|
||||||
if (!sourceEnded) {
|
|
||||||
positionUs -= latencyUs;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
positionUs = Math.min(positionUs, framesToDurationUs(getWrittenFrames()));
|
positionUs = Math.min(positionUs, framesToDurationUs(getWrittenFrames()));
|
||||||
return startMediaTimeUs + applySpeedup(positionUs);
|
return startMediaTimeUs + applySpeedup(positionUs);
|
||||||
}
|
}
|
||||||
@ -497,8 +403,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
bufferSize = (int) (PASSTHROUGH_BUFFER_DURATION_US * 192 * 6 * 1024 / C.MICROS_PER_SECOND);
|
bufferSize = (int) (PASSTHROUGH_BUFFER_DURATION_US * 192 * 6 * 1024 / C.MICROS_PER_SECOND);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bufferSizeUs =
|
|
||||||
isInputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void resetAudioProcessors() {
|
private void resetAudioProcessors() {
|
||||||
@ -557,17 +461,16 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
audioTrackUtil.reconfigure(audioTrack, needsPassthroughWorkarounds());
|
audioTrackPositionTracker.setAudioTrack(
|
||||||
|
audioTrack, outputEncoding, outputPcmFrameSize, bufferSize);
|
||||||
setVolumeInternal();
|
setVolumeInternal();
|
||||||
hasData = false;
|
|
||||||
latencyUs = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void play() {
|
public void play() {
|
||||||
playing = true;
|
playing = true;
|
||||||
if (isInitialized()) {
|
if (isInitialized()) {
|
||||||
resumeSystemTimeUs = System.nanoTime() / 1000;
|
audioTrackPositionTracker.start();
|
||||||
audioTrack.play();
|
audioTrack.play();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -592,30 +495,10 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (needsPassthroughWorkarounds()) {
|
if (!audioTrackPositionTracker.mayHandleBuffer(getWrittenFrames())) {
|
||||||
// An AC-3 audio track continues to play data written while it is paused. Stop writing so its
|
|
||||||
// buffer empties. See [Internal: b/18899620].
|
|
||||||
if (audioTrack.getPlayState() == PLAYSTATE_PAUSED) {
|
|
||||||
// We force an underrun to pause the track, so don't notify the listener in this case.
|
|
||||||
hasData = false;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// A new AC-3 audio track's playback position continues to increase from the old track's
|
|
||||||
// position for a short time after is has been released. Avoid writing data until the playback
|
|
||||||
// head position actually returns to zero.
|
|
||||||
if (audioTrack.getPlayState() == PLAYSTATE_STOPPED
|
|
||||||
&& audioTrack.getPlaybackHeadPosition() != 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean hadData = hasData;
|
|
||||||
hasData = hasPendingData();
|
|
||||||
if (hadData && !hasData && audioTrack.getPlayState() != PLAYSTATE_STOPPED && listener != null) {
|
|
||||||
audioTrackPositionErrorListener.onUnderrun(bufferSize, C.usToMs(bufferSizeUs));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (inputBuffer == null) {
|
if (inputBuffer == null) {
|
||||||
// We are seeing this buffer for the first time.
|
// We are seeing this buffer for the first time.
|
||||||
if (!buffer.hasRemaining()) {
|
if (!buffer.hasRemaining()) {
|
||||||
@ -694,7 +577,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (audioTrackUtil.needsReset(getWrittenFrames())) {
|
if (audioTrackPositionTracker.isStalled(getWrittenFrames())) {
|
||||||
Log.w(TAG, "Resetting stalled audio track");
|
Log.w(TAG, "Resetting stalled audio track");
|
||||||
reset();
|
reset();
|
||||||
return true;
|
return true;
|
||||||
@ -757,9 +640,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
int bytesWritten = 0;
|
int bytesWritten = 0;
|
||||||
if (Util.SDK_INT < 21) { // isInputPcm == true
|
if (Util.SDK_INT < 21) { // isInputPcm == true
|
||||||
// Work out how many bytes we can write without the risk of blocking.
|
// Work out how many bytes we can write without the risk of blocking.
|
||||||
int bytesPending =
|
int bytesToWrite = audioTrackPositionTracker.getAvailableBufferSize(writtenPcmBytes);
|
||||||
(int) (writtenPcmBytes - (audioTrackUtil.getPlaybackHeadPosition() * outputPcmFrameSize));
|
|
||||||
int bytesToWrite = bufferSize - bytesPending;
|
|
||||||
if (bytesToWrite > 0) {
|
if (bytesToWrite > 0) {
|
||||||
bytesToWrite = Math.min(bytesRemaining, bytesToWrite);
|
bytesToWrite = Math.min(bytesRemaining, bytesToWrite);
|
||||||
bytesWritten = audioTrack.write(preV21OutputBuffer, preV21OutputBufferOffset, bytesToWrite);
|
bytesWritten = audioTrack.write(preV21OutputBuffer, preV21OutputBufferOffset, bytesToWrite);
|
||||||
@ -801,7 +682,8 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
if (drainAudioProcessorsToEndOfStream()) {
|
if (drainAudioProcessorsToEndOfStream()) {
|
||||||
// The audio processors have drained, so drain the underlying audio track.
|
// The audio processors have drained, so drain the underlying audio track.
|
||||||
audioTrackUtil.handleEndOfStream(getWrittenFrames());
|
audioTrackPositionTracker.handleEndOfStream(getWrittenFrames());
|
||||||
|
audioTrack.stop();
|
||||||
bytesUntilNextAvSync = 0;
|
bytesUntilNextAvSync = 0;
|
||||||
handledEndOfStream = true;
|
handledEndOfStream = true;
|
||||||
}
|
}
|
||||||
@ -844,9 +726,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hasPendingData() {
|
public boolean hasPendingData() {
|
||||||
return isInitialized()
|
return isInitialized() && audioTrackPositionTracker.hasPendingData(getWrittenFrames());
|
||||||
&& (getWrittenFrames() > audioTrackUtil.getPlaybackHeadPosition()
|
|
||||||
|| overrideHasPendingData());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -942,9 +822,8 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
@Override
|
@Override
|
||||||
public void pause() {
|
public void pause() {
|
||||||
playing = false;
|
playing = false;
|
||||||
if (isInitialized()) {
|
if (isInitialized() && audioTrackPositionTracker.pause()) {
|
||||||
resetSyncParams();
|
audioTrack.pause();
|
||||||
audioTrackUtil.pause();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -977,14 +856,13 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
avSyncHeader = null;
|
avSyncHeader = null;
|
||||||
bytesUntilNextAvSync = 0;
|
bytesUntilNextAvSync = 0;
|
||||||
startMediaTimeState = START_NOT_SET;
|
startMediaTimeState = START_NOT_SET;
|
||||||
resetSyncParams();
|
if (audioTrackPositionTracker.isPlaying()) {
|
||||||
if (audioTrack.getPlayState() == PLAYSTATE_PLAYING) {
|
|
||||||
audioTrack.pause();
|
audioTrack.pause();
|
||||||
}
|
}
|
||||||
// AudioTrack.release can take some time, so we call it on a background thread.
|
// AudioTrack.release can take some time, so we call it on a background thread.
|
||||||
final AudioTrack toRelease = audioTrack;
|
final AudioTrack toRelease = audioTrack;
|
||||||
audioTrack = null;
|
audioTrack = null;
|
||||||
audioTrackUtil.reconfigure(null, false);
|
audioTrackPositionTracker.reset();
|
||||||
releasingConditionVariable.close();
|
releasingConditionVariable.close();
|
||||||
new Thread() {
|
new Thread() {
|
||||||
@Override
|
@Override
|
||||||
@ -1033,13 +911,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
}.start();
|
}.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns whether {@link #getCurrentPositionUs} can return the current playback position.
|
|
||||||
*/
|
|
||||||
private boolean hasCurrentPositionUs() {
|
|
||||||
return isInitialized() && startMediaTimeState != START_NOT_SET;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the underlying audio track {@code positionUs} with any applicable speedup applied.
|
* Returns the underlying audio track {@code positionUs} with any applicable speedup applied.
|
||||||
*/
|
*/
|
||||||
@ -1067,85 +938,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
positionUs - playbackParametersPositionUs, playbackParameters.speed);
|
positionUs - playbackParametersPositionUs, playbackParameters.speed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Updates the audio track latency and playback position parameters.
|
|
||||||
*/
|
|
||||||
private void maybeSampleSyncParams() {
|
|
||||||
long playbackPositionUs = audioTrackUtil.getPositionUs();
|
|
||||||
if (playbackPositionUs == 0) {
|
|
||||||
// The AudioTrack hasn't output anything yet.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
long systemTimeUs = System.nanoTime() / 1000;
|
|
||||||
if (systemTimeUs - lastPlayheadSampleTimeUs >= MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US) {
|
|
||||||
// Take a new sample and update the smoothed offset between the system clock and the playhead.
|
|
||||||
playheadOffsets[nextPlayheadOffsetIndex] = playbackPositionUs - systemTimeUs;
|
|
||||||
nextPlayheadOffsetIndex = (nextPlayheadOffsetIndex + 1) % MAX_PLAYHEAD_OFFSET_COUNT;
|
|
||||||
if (playheadOffsetCount < MAX_PLAYHEAD_OFFSET_COUNT) {
|
|
||||||
playheadOffsetCount++;
|
|
||||||
}
|
|
||||||
lastPlayheadSampleTimeUs = systemTimeUs;
|
|
||||||
smoothedPlayheadOffsetUs = 0;
|
|
||||||
for (int i = 0; i < playheadOffsetCount; i++) {
|
|
||||||
smoothedPlayheadOffsetUs += playheadOffsets[i] / playheadOffsetCount;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (needsPassthroughWorkarounds()) {
|
|
||||||
// Don't sample the timestamp and latency if this is an AC-3 passthrough AudioTrack on
|
|
||||||
// platform API versions 21/22, as incorrect values are returned. See [Internal: b/21145353].
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (systemTimeUs - lastTimestampSampleTimeUs >= MIN_TIMESTAMP_SAMPLE_INTERVAL_US) {
|
|
||||||
audioTimestampSet = audioTrackUtil.updateTimestamp();
|
|
||||||
if (audioTimestampSet) {
|
|
||||||
// Perform sanity checks on the timestamp.
|
|
||||||
long audioTimestampSystemTimeUs = audioTrackUtil.getTimestampSystemTimeUs();
|
|
||||||
long audioTimestampPositionFrames = audioTrackUtil.getTimestampPositionFrames();
|
|
||||||
if (audioTimestampSystemTimeUs < resumeSystemTimeUs) {
|
|
||||||
// The timestamp corresponds to a time before the track was most recently resumed.
|
|
||||||
audioTimestampSet = false;
|
|
||||||
} else if (Math.abs(audioTimestampSystemTimeUs - systemTimeUs)
|
|
||||||
> MAX_AUDIO_TIMESTAMP_OFFSET_US) {
|
|
||||||
audioTrackPositionErrorListener.onSystemTimeUsMismatch(
|
|
||||||
audioTimestampPositionFrames,
|
|
||||||
audioTimestampSystemTimeUs,
|
|
||||||
systemTimeUs,
|
|
||||||
playbackPositionUs);
|
|
||||||
audioTimestampSet = false;
|
|
||||||
} else if (Math.abs(framesToDurationUs(audioTimestampPositionFrames) - playbackPositionUs)
|
|
||||||
> MAX_AUDIO_TIMESTAMP_OFFSET_US) {
|
|
||||||
audioTrackPositionErrorListener.onPositionFramesMismatch(
|
|
||||||
audioTimestampPositionFrames,
|
|
||||||
audioTimestampSystemTimeUs,
|
|
||||||
systemTimeUs,
|
|
||||||
playbackPositionUs);
|
|
||||||
audioTimestampSet = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (getLatencyMethod != null && isInputPcm) {
|
|
||||||
try {
|
|
||||||
// Compute the audio track latency, excluding the latency due to the buffer (leaving
|
|
||||||
// latency due to the mixer and audio hardware driver).
|
|
||||||
latencyUs = (Integer) getLatencyMethod.invoke(audioTrack, (Object[]) null) * 1000L
|
|
||||||
- bufferSizeUs;
|
|
||||||
// Sanity check that the latency is non-negative.
|
|
||||||
latencyUs = Math.max(latencyUs, 0);
|
|
||||||
// Sanity check that the latency isn't too large.
|
|
||||||
if (latencyUs > MAX_LATENCY_US) {
|
|
||||||
audioTrackPositionErrorListener.onInvalidLatency(latencyUs);
|
|
||||||
latencyUs = 0;
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
// The method existed, but doesn't work. Don't try again.
|
|
||||||
getLatencyMethod = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lastTimestampSampleTimeUs = systemTimeUs;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isInitialized() {
|
private boolean isInitialized() {
|
||||||
return audioTrack != null;
|
return audioTrack != null;
|
||||||
}
|
}
|
||||||
@ -1170,36 +962,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
return isInputPcm ? (writtenPcmBytes / outputPcmFrameSize) : writtenEncodedFrames;
|
return isInputPcm ? (writtenPcmBytes / outputPcmFrameSize) : writtenEncodedFrames;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void resetSyncParams() {
|
|
||||||
smoothedPlayheadOffsetUs = 0;
|
|
||||||
playheadOffsetCount = 0;
|
|
||||||
nextPlayheadOffsetIndex = 0;
|
|
||||||
lastPlayheadSampleTimeUs = 0;
|
|
||||||
audioTimestampSet = false;
|
|
||||||
lastTimestampSampleTimeUs = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns whether to work around problems with passthrough audio tracks.
|
|
||||||
* See [Internal: b/18899620, b/19187573, b/21145353].
|
|
||||||
*/
|
|
||||||
private boolean needsPassthroughWorkarounds() {
|
|
||||||
return Util.SDK_INT < 23
|
|
||||||
&& (outputEncoding == C.ENCODING_AC3 || outputEncoding == C.ENCODING_E_AC3);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns whether the audio track should behave as though it has pending data. This is to work
|
|
||||||
* around an issue on platform API versions 21/22 where AC-3 audio tracks can't be paused, so we
|
|
||||||
* empty their buffers when paused. In this case, they should still behave as if they have
|
|
||||||
* pending data, otherwise writing will never resume.
|
|
||||||
*/
|
|
||||||
private boolean overrideHasPendingData() {
|
|
||||||
return needsPassthroughWorkarounds()
|
|
||||||
&& audioTrack.getPlayState() == PLAYSTATE_PAUSED
|
|
||||||
&& audioTrack.getPlaybackHeadPosition() == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
private AudioTrack initializeAudioTrack() throws InitializationException {
|
private AudioTrack initializeAudioTrack() throws InitializationException {
|
||||||
AudioTrack audioTrack;
|
AudioTrack audioTrack;
|
||||||
if (Util.SDK_INT >= 21) {
|
if (Util.SDK_INT >= 21) {
|
||||||
@ -1349,239 +1111,6 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
audioTrack.setStereoVolume(volume, volume);
|
audioTrack.setStereoVolume(volume, volume);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Wraps an {@link AudioTrack} to expose useful utility methods.
|
|
||||||
*/
|
|
||||||
private static class AudioTrackUtil {
|
|
||||||
|
|
||||||
private static final long FORCE_RESET_WORKAROUND_TIMEOUT_MS = 200;
|
|
||||||
|
|
||||||
protected AudioTrack audioTrack;
|
|
||||||
private boolean needsPassthroughWorkaround;
|
|
||||||
private int sampleRate;
|
|
||||||
private long lastRawPlaybackHeadPosition;
|
|
||||||
private long rawPlaybackHeadWrapCount;
|
|
||||||
private long passthroughWorkaroundPauseOffset;
|
|
||||||
|
|
||||||
private long stopTimestampUs;
|
|
||||||
private long forceResetWorkaroundTimeMs;
|
|
||||||
private long stopPlaybackHeadPosition;
|
|
||||||
private long endPlaybackHeadPosition;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reconfigures the audio track utility helper to use the specified {@code audioTrack}.
|
|
||||||
*
|
|
||||||
* @param audioTrack The audio track to wrap.
|
|
||||||
* @param needsPassthroughWorkaround Whether to workaround issues with pausing AC-3 passthrough
|
|
||||||
* audio tracks on platform API version 21/22.
|
|
||||||
*/
|
|
||||||
public void reconfigure(AudioTrack audioTrack, boolean needsPassthroughWorkaround) {
|
|
||||||
this.audioTrack = audioTrack;
|
|
||||||
this.needsPassthroughWorkaround = needsPassthroughWorkaround;
|
|
||||||
stopTimestampUs = C.TIME_UNSET;
|
|
||||||
forceResetWorkaroundTimeMs = C.TIME_UNSET;
|
|
||||||
lastRawPlaybackHeadPosition = 0;
|
|
||||||
rawPlaybackHeadWrapCount = 0;
|
|
||||||
passthroughWorkaroundPauseOffset = 0;
|
|
||||||
if (audioTrack != null) {
|
|
||||||
sampleRate = audioTrack.getSampleRate();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Stops the audio track in a way that ensures media written to it is played out in full, and
|
|
||||||
* that {@link #getPlaybackHeadPosition()} and {@link #getPositionUs()} continue to increment as
|
|
||||||
* the remaining media is played out.
|
|
||||||
*
|
|
||||||
* @param writtenFrames The total number of frames that have been written.
|
|
||||||
*/
|
|
||||||
public void handleEndOfStream(long writtenFrames) {
|
|
||||||
stopPlaybackHeadPosition = getPlaybackHeadPosition();
|
|
||||||
stopTimestampUs = SystemClock.elapsedRealtime() * 1000;
|
|
||||||
endPlaybackHeadPosition = writtenFrames;
|
|
||||||
audioTrack.stop();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Pauses the audio track unless the end of the stream has been handled, in which case calling
|
|
||||||
* this method does nothing.
|
|
||||||
*/
|
|
||||||
public void pause() {
|
|
||||||
if (stopTimestampUs != C.TIME_UNSET) {
|
|
||||||
// We don't want to knock the audio track back into the paused state.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
audioTrack.pause();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns whether the track is in an invalid state and must be reset.
|
|
||||||
*
|
|
||||||
* @see #getPlaybackHeadPosition()
|
|
||||||
*/
|
|
||||||
public boolean needsReset(long writtenFrames) {
|
|
||||||
return forceResetWorkaroundTimeMs != C.TIME_UNSET && writtenFrames > 0
|
|
||||||
&& SystemClock.elapsedRealtime() - forceResetWorkaroundTimeMs
|
|
||||||
>= FORCE_RESET_WORKAROUND_TIMEOUT_MS;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* {@link AudioTrack#getPlaybackHeadPosition()} returns a value intended to be interpreted as an
|
|
||||||
* unsigned 32 bit integer, which also wraps around periodically. This method returns the
|
|
||||||
* playback head position as a long that will only wrap around if the value exceeds
|
|
||||||
* {@link Long#MAX_VALUE} (which in practice will never happen).
|
|
||||||
*
|
|
||||||
* @return The playback head position, in frames.
|
|
||||||
*/
|
|
||||||
public long getPlaybackHeadPosition() {
|
|
||||||
if (stopTimestampUs != C.TIME_UNSET) {
|
|
||||||
// Simulate the playback head position up to the total number of frames submitted.
|
|
||||||
long elapsedTimeSinceStopUs = (SystemClock.elapsedRealtime() * 1000) - stopTimestampUs;
|
|
||||||
long framesSinceStop = (elapsedTimeSinceStopUs * sampleRate) / C.MICROS_PER_SECOND;
|
|
||||||
return Math.min(endPlaybackHeadPosition, stopPlaybackHeadPosition + framesSinceStop);
|
|
||||||
}
|
|
||||||
|
|
||||||
@PlayState int playState = audioTrack.getPlayState();
|
|
||||||
if (playState == PLAYSTATE_STOPPED) {
|
|
||||||
// The audio track hasn't been started.
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
long rawPlaybackHeadPosition = 0xFFFFFFFFL & audioTrack.getPlaybackHeadPosition();
|
|
||||||
if (needsPassthroughWorkaround) {
|
|
||||||
// Work around an issue with passthrough/direct AudioTracks on platform API versions 21/22
|
|
||||||
// where the playback head position jumps back to zero on paused passthrough/direct audio
|
|
||||||
// tracks. See [Internal: b/19187573].
|
|
||||||
if (playState == PLAYSTATE_PAUSED && rawPlaybackHeadPosition == 0) {
|
|
||||||
passthroughWorkaroundPauseOffset = lastRawPlaybackHeadPosition;
|
|
||||||
}
|
|
||||||
rawPlaybackHeadPosition += passthroughWorkaroundPauseOffset;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Util.SDK_INT <= 28) {
|
|
||||||
if (rawPlaybackHeadPosition == 0
|
|
||||||
&& lastRawPlaybackHeadPosition > 0
|
|
||||||
&& playState == PLAYSTATE_PLAYING) {
|
|
||||||
// If connecting a Bluetooth audio device fails, the AudioTrack may be left in a state
|
|
||||||
// where its Java API is in the playing state, but the native track is stopped. When this
|
|
||||||
// happens the playback head position gets stuck at zero. In this case, return the old
|
|
||||||
// playback head position and force the track to be reset after
|
|
||||||
// {@link #FORCE_RESET_WORKAROUND_TIMEOUT_MS} has elapsed.
|
|
||||||
if (forceResetWorkaroundTimeMs == C.TIME_UNSET) {
|
|
||||||
forceResetWorkaroundTimeMs = SystemClock.elapsedRealtime();
|
|
||||||
}
|
|
||||||
return lastRawPlaybackHeadPosition;
|
|
||||||
} else {
|
|
||||||
forceResetWorkaroundTimeMs = C.TIME_UNSET;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (lastRawPlaybackHeadPosition > rawPlaybackHeadPosition) {
|
|
||||||
// The value must have wrapped around.
|
|
||||||
rawPlaybackHeadWrapCount++;
|
|
||||||
}
|
|
||||||
lastRawPlaybackHeadPosition = rawPlaybackHeadPosition;
|
|
||||||
return rawPlaybackHeadPosition + (rawPlaybackHeadWrapCount << 32);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the duration of played media since reconfiguration, in microseconds.
|
|
||||||
*/
|
|
||||||
public long getPositionUs() {
|
|
||||||
return (getPlaybackHeadPosition() * C.MICROS_PER_SECOND) / sampleRate;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Updates the values returned by {@link #getTimestampSystemTimeUs()} and {@link
|
|
||||||
* #getTimestampPositionFrames()}.
|
|
||||||
*
|
|
||||||
* @return Whether the timestamp values were updated.
|
|
||||||
*/
|
|
||||||
public boolean updateTimestamp() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the system time in microseconds of the last {@link AudioTimestamp}, obtained during
|
|
||||||
* the most recent call to {@link #updateTimestamp()} that returned true.
|
|
||||||
*
|
|
||||||
* @return The system time in microseconds of the last {@link AudioTimestamp}, obtained during
|
|
||||||
* the most recent call to {@link #updateTimestamp()} that returned true.
|
|
||||||
* @throws UnsupportedOperationException If the implementation does not support audio timestamp
|
|
||||||
* queries. {@link #updateTimestamp()} will always return false in this case.
|
|
||||||
*/
|
|
||||||
public long getTimestampSystemTimeUs() {
|
|
||||||
// Should never be called if updateTimestamp() returned false.
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the position in frames of the last {@link AudioTimestamp}, obtained during the most
|
|
||||||
* recent call to {@link #updateTimestamp()} that returned true. The value is adjusted so that
|
|
||||||
* wrap around only occurs if the value exceeds {@link Long#MAX_VALUE} (which in practice will
|
|
||||||
* never happen).
|
|
||||||
*
|
|
||||||
* @return The position in frames of the last {@link AudioTimestamp}, obtained during the most
|
|
||||||
* recent call to {@link #updateTimestamp()} that returned true.
|
|
||||||
* @throws UnsupportedOperationException If the implementation does not support audio timestamp
|
|
||||||
* queries. {@link #updateTimestamp()} will always return false in this case.
|
|
||||||
*/
|
|
||||||
public long getTimestampPositionFrames() {
|
|
||||||
// Should never be called if updateTimestamp() returned false.
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@TargetApi(19)
|
|
||||||
private static class AudioTrackUtilV19 extends AudioTrackUtil {
|
|
||||||
|
|
||||||
private final AudioTimestamp audioTimestamp;
|
|
||||||
|
|
||||||
private long rawTimestampPositionFramesWrapCount;
|
|
||||||
private long lastRawTimestampPositionFrames;
|
|
||||||
private long lastTimestampPositionFrames;
|
|
||||||
|
|
||||||
public AudioTrackUtilV19() {
|
|
||||||
audioTimestamp = new AudioTimestamp();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void reconfigure(AudioTrack audioTrack, boolean needsPassthroughWorkaround) {
|
|
||||||
super.reconfigure(audioTrack, needsPassthroughWorkaround);
|
|
||||||
rawTimestampPositionFramesWrapCount = 0;
|
|
||||||
lastRawTimestampPositionFrames = 0;
|
|
||||||
lastTimestampPositionFrames = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean updateTimestamp() {
|
|
||||||
boolean updated = audioTrack.getTimestamp(audioTimestamp);
|
|
||||||
if (updated) {
|
|
||||||
long rawPositionFrames = audioTimestamp.framePosition;
|
|
||||||
if (lastRawTimestampPositionFrames > rawPositionFrames) {
|
|
||||||
// The value must have wrapped around.
|
|
||||||
rawTimestampPositionFramesWrapCount++;
|
|
||||||
}
|
|
||||||
lastRawTimestampPositionFrames = rawPositionFrames;
|
|
||||||
lastTimestampPositionFrames =
|
|
||||||
rawPositionFrames + (rawTimestampPositionFramesWrapCount << 32);
|
|
||||||
}
|
|
||||||
return updated;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getTimestampSystemTimeUs() {
|
|
||||||
return audioTimestamp.nanoTime / 1000;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getTimestampPositionFrames() {
|
|
||||||
return lastTimestampPositionFrames;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stores playback parameters with the position and media time at which they apply.
|
* Stores playback parameters with the position and media time at which they apply.
|
||||||
*/
|
*/
|
||||||
@ -1600,64 +1129,7 @@ public final class DefaultAudioSink implements AudioSink {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(b/74325207): Factor out position tracking functionality into a separate class.
|
private final class PositionTrackerListener implements AudioTrackPositionTracker.Listener {
|
||||||
|
|
||||||
/** Interface for errors relating to the audio track position. */
|
|
||||||
private interface AudioTrackPositionErrorListener {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called when the frame position is too far from the expected frame position.
|
|
||||||
*
|
|
||||||
* @param audioTimestampPositionFrames The frame position of the last known audio track
|
|
||||||
* timestamp.
|
|
||||||
* @param audioTimestampSystemTimeUs The system time associated with the last known audio track
|
|
||||||
* timestamp, in microseconds.
|
|
||||||
* @param systemTimeUs The current time.
|
|
||||||
* @param playbackPositionUs The current playback head position in microseconds.
|
|
||||||
*/
|
|
||||||
void onPositionFramesMismatch(
|
|
||||||
long audioTimestampPositionFrames,
|
|
||||||
long audioTimestampSystemTimeUs,
|
|
||||||
long systemTimeUs,
|
|
||||||
long playbackPositionUs);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called when the system time associated with the last known audio track timestamp is
|
|
||||||
* unexpectedly far from the current time.
|
|
||||||
*
|
|
||||||
* @param audioTimestampPositionFrames The frame position of the last known audio track
|
|
||||||
* timestamp.
|
|
||||||
* @param audioTimestampSystemTimeUs The system time associated with the last known audio track
|
|
||||||
* timestamp, in microseconds.
|
|
||||||
* @param systemTimeUs The current time.
|
|
||||||
* @param playbackPositionUs The current playback head position in microseconds.
|
|
||||||
*/
|
|
||||||
void onSystemTimeUsMismatch(
|
|
||||||
long audioTimestampPositionFrames,
|
|
||||||
long audioTimestampSystemTimeUs,
|
|
||||||
long systemTimeUs,
|
|
||||||
long playbackPositionUs);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called when the audio track has provided an invalid latency.
|
|
||||||
*
|
|
||||||
* @param latencyUs The reported latency in microseconds.
|
|
||||||
*/
|
|
||||||
void onInvalidLatency(long latencyUs);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called if the audio track runs out of data to play.
|
|
||||||
*
|
|
||||||
* @param bufferSize The size of the sink's buffer, in bytes.
|
|
||||||
* @param bufferSizeMs The size of the sink's buffer, in milliseconds, if it is configured for
|
|
||||||
* PCM output. {@link C#TIME_UNSET} if it is configured for encoded audio output, as the
|
|
||||||
* buffered media can have a variable bitrate so the duration may be unknown.
|
|
||||||
*/
|
|
||||||
void onUnderrun(int bufferSize, long bufferSizeMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
private final class DefaultAudioTrackPositionErrorListener
|
|
||||||
implements AudioTrackPositionErrorListener {
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onPositionFramesMismatch(
|
public void onPositionFramesMismatch(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user