Add AudioSink interface and use it from audio renderers
This change allows applications to provide custom AudioSinks, which could be based on android.media.AudioTrack like AudioTrackAudioSink, or could be completely custom. The refactoring is mostly mechanical and shouldn't result in any functionality changes. Some android.media.AudioTrack-specific details have to appear in the AudioSink interface so this change modifies the javadoc on the AudioTrack (now AudioSink) to note that some methods will have no effect. ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=170311083
This commit is contained in:
parent
b14b3d43de
commit
9c7950f342
@ -248,7 +248,7 @@ import java.util.Locale;
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs) {
|
public void onAudioSinkUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs) {
|
||||||
printInternalError("audioTrackUnderrun [" + bufferSize + ", " + bufferSizeMs + ", "
|
printInternalError("audioTrackUnderrun [" + bufferSize + ", " + bufferSizeMs + ", "
|
||||||
+ elapsedSinceLastFeedMs + "]", null);
|
+ elapsedSinceLastFeedMs + "]", null);
|
||||||
}
|
}
|
||||||
|
@ -976,10 +976,10 @@ public class SimpleExoPlayer implements ExoPlayer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs,
|
public void onAudioSinkUnderrun(int bufferSize, long bufferSizeMs,
|
||||||
long elapsedSinceLastFeedMs) {
|
long elapsedSinceLastFeedMs) {
|
||||||
if (audioDebugListener != null) {
|
if (audioDebugListener != null) {
|
||||||
audioDebugListener.onAudioTrackUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
|
audioDebugListener.onAudioSinkUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,15 +63,15 @@ public interface AudioRendererEventListener {
|
|||||||
void onAudioInputFormatChanged(Format format);
|
void onAudioInputFormatChanged(Format format);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called when an {@link AudioTrack} underrun occurs.
|
* Called when an {@link AudioSink} underrun occurs.
|
||||||
*
|
*
|
||||||
* @param bufferSize The size of the {@link AudioTrack}'s buffer, in bytes.
|
* @param bufferSize The size of the {@link AudioSink}'s buffer, in bytes.
|
||||||
* @param bufferSizeMs The size of the {@link AudioTrack}'s buffer, in milliseconds, if it is
|
* @param bufferSizeMs The size of the {@link AudioSink}'s buffer, in milliseconds, if it is
|
||||||
* configured for PCM output. {@link C#TIME_UNSET} if it is configured for passthrough output,
|
* configured for PCM output. {@link C#TIME_UNSET} if it is configured for passthrough output,
|
||||||
* as the buffered media can have a variable bitrate so the duration may be unknown.
|
* as the buffered media can have a variable bitrate so the duration may be unknown.
|
||||||
* @param elapsedSinceLastFeedMs The time since the {@link AudioTrack} was last fed data.
|
* @param elapsedSinceLastFeedMs The time since the {@link AudioSink} was last fed data.
|
||||||
*/
|
*/
|
||||||
void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs);
|
void onAudioSinkUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called when the renderer is disabled.
|
* Called when the renderer is disabled.
|
||||||
@ -144,7 +144,7 @@ public interface AudioRendererEventListener {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes {@link AudioRendererEventListener#onAudioTrackUnderrun(int, long, long)}.
|
* Invokes {@link AudioRendererEventListener#onAudioSinkUnderrun(int, long, long)}.
|
||||||
*/
|
*/
|
||||||
public void audioTrackUnderrun(final int bufferSize, final long bufferSizeMs,
|
public void audioTrackUnderrun(final int bufferSize, final long bufferSizeMs,
|
||||||
final long elapsedSinceLastFeedMs) {
|
final long elapsedSinceLastFeedMs) {
|
||||||
@ -152,7 +152,7 @@ public interface AudioRendererEventListener {
|
|||||||
handler.post(new Runnable() {
|
handler.post(new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
listener.onAudioTrackUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
|
listener.onAudioSinkUnderrun(bufferSize, bufferSizeMs, elapsedSinceLastFeedMs);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,332 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2017 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package com.google.android.exoplayer2.audio;
|
||||||
|
|
||||||
|
import android.media.AudioTrack;
|
||||||
|
import android.support.annotation.Nullable;
|
||||||
|
import com.google.android.exoplayer2.C;
|
||||||
|
import com.google.android.exoplayer2.PlaybackParameters;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A sink that consumes audio data.
|
||||||
|
* <p>
|
||||||
|
* Before starting playback, specify the input audio format by calling
|
||||||
|
* {@link #configure(String, int, int, int, int, int[], int, int)}.
|
||||||
|
* <p>
|
||||||
|
* Call {@link #handleBuffer(ByteBuffer, long)} to write data, and {@link #handleDiscontinuity()}
|
||||||
|
* when the data being fed is discontinuous. Call {@link #play()} to start playing the written data.
|
||||||
|
* <p>
|
||||||
|
* Call {@link #configure(String, int, int, int, int, int[], int, int)} whenever the input format
|
||||||
|
* changes. The sink will be reinitialized on the next call to
|
||||||
|
* {@link #handleBuffer(ByteBuffer, long)}.
|
||||||
|
* <p>
|
||||||
|
* Call {@link #reset()} to prepare the sink to receive audio data from a new playback position.
|
||||||
|
* <p>
|
||||||
|
* Call {@link #playToEndOfStream()} repeatedly to play out all data when no more input buffers will
|
||||||
|
* be provided via {@link #handleBuffer(ByteBuffer, long)} until the next {@link #reset()}. Call
|
||||||
|
* {@link #release()} when the instance is no longer required.
|
||||||
|
* <p>
|
||||||
|
* The implementation may be backed by a platform {@link AudioTrack}. In this case,
|
||||||
|
* {@link #setAudioSessionId(int)}, {@link #setAudioAttributes(AudioAttributes)},
|
||||||
|
* {@link #enableTunnelingV21(int)} and/or {@link #disableTunneling()} may be called before writing
|
||||||
|
* data to the sink. These methods may also be called after writing data to the sink, in which case
|
||||||
|
* it will be reinitialized as required. For implementations that are not based on platform
|
||||||
|
* {@link AudioTrack}s, calling methods relating to audio sessions, audio attributes, and tunneling
|
||||||
|
* may have no effect.
|
||||||
|
*/
|
||||||
|
public interface AudioSink {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Listener for audio sink events.
|
||||||
|
*/
|
||||||
|
interface Listener {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called if the audio sink has started rendering audio to a new platform audio session.
|
||||||
|
*
|
||||||
|
* @param audioSessionId The newly generated audio session's identifier.
|
||||||
|
*/
|
||||||
|
void onAudioSessionId(int audioSessionId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called when the audio sink handles a buffer whose timestamp is discontinuous with the last
|
||||||
|
* buffer handled since it was reset.
|
||||||
|
*/
|
||||||
|
void onPositionDiscontinuity();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called when the audio sink runs out of data.
|
||||||
|
* <p>
|
||||||
|
* An audio sink implementation may never call this method (for example, if audio data is
|
||||||
|
* consumed in batches rather than based on the sink's own clock).
|
||||||
|
*
|
||||||
|
* @param bufferSize The size of the sink's buffer, in bytes.
|
||||||
|
* @param bufferSizeMs The size of the sink's buffer, in milliseconds, if it is configured for
|
||||||
|
* PCM output. {@link C#TIME_UNSET} if it is configured for passthrough output, as the
|
||||||
|
* buffered media can have a variable bitrate so the duration may be unknown.
|
||||||
|
* @param elapsedSinceLastFeedMs The time since the sink was last fed data, in milliseconds.
|
||||||
|
*/
|
||||||
|
void onUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when a failure occurs configuring the sink.
|
||||||
|
*/
|
||||||
|
final class ConfigurationException extends Exception {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new configuration exception with the specified {@code cause} and no message.
|
||||||
|
*/
|
||||||
|
public ConfigurationException(Throwable cause) {
|
||||||
|
super(cause);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new configuration exception with the specified {@code message} and no cause.
|
||||||
|
*/
|
||||||
|
public ConfigurationException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when a failure occurs initializing the sink.
|
||||||
|
*/
|
||||||
|
final class InitializationException extends Exception {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The underlying {@link AudioTrack}'s state, if applicable.
|
||||||
|
*/
|
||||||
|
public final int audioTrackState;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param audioTrackState The underlying {@link AudioTrack}'s state, if applicable.
|
||||||
|
* @param sampleRate The requested sample rate in Hz.
|
||||||
|
* @param channelConfig The requested channel configuration.
|
||||||
|
* @param bufferSize The requested buffer size in bytes.
|
||||||
|
*/
|
||||||
|
public InitializationException(int audioTrackState, int sampleRate, int channelConfig,
|
||||||
|
int bufferSize) {
|
||||||
|
super("AudioTrack init failed: " + audioTrackState + ", Config(" + sampleRate + ", "
|
||||||
|
+ channelConfig + ", " + bufferSize + ")");
|
||||||
|
this.audioTrackState = audioTrackState;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thrown when a failure occurs writing to the sink.
|
||||||
|
*/
|
||||||
|
final class WriteException extends Exception {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The error value returned from the sink implementation. If the sink writes to a platform
|
||||||
|
* {@link AudioTrack}, this will be the error value returned from
|
||||||
|
* {@link AudioTrack#write(byte[], int, int)} or {@link AudioTrack#write(ByteBuffer, int, int)}.
|
||||||
|
* Otherwise, the meaning of the error code depends on the sink implementation.
|
||||||
|
*/
|
||||||
|
public final int errorCode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param errorCode The error value returned from the sink implementation.
|
||||||
|
*/
|
||||||
|
public WriteException(int errorCode) {
|
||||||
|
super("AudioTrack write failed: " + errorCode);
|
||||||
|
this.errorCode = errorCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returned by {@link #getCurrentPositionUs(boolean)} when the position is not set.
|
||||||
|
*/
|
||||||
|
long CURRENT_POSITION_NOT_SET = Long.MIN_VALUE;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the listener for sink events, which should be the audio renderer.
|
||||||
|
*
|
||||||
|
* @param listener The listener for sink events, which should be the audio renderer.
|
||||||
|
*/
|
||||||
|
void setListener(Listener listener);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns whether it's possible to play audio in the specified format using encoded audio
|
||||||
|
* passthrough.
|
||||||
|
*
|
||||||
|
* @param mimeType The format mime type.
|
||||||
|
* @return Whether it's possible to play audio in the format using encoded audio passthrough.
|
||||||
|
*/
|
||||||
|
boolean isPassthroughSupported(String mimeType);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the playback position in the stream starting at zero, in microseconds, or
|
||||||
|
* {@link #CURRENT_POSITION_NOT_SET} if it is not yet available.
|
||||||
|
*
|
||||||
|
* @param sourceEnded Specify {@code true} if no more input buffers will be provided.
|
||||||
|
* @return The playback position relative to the start of playback, in microseconds.
|
||||||
|
*/
|
||||||
|
long getCurrentPositionUs(boolean sourceEnded);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configures (or reconfigures) the sink.
|
||||||
|
*
|
||||||
|
* @param mimeType The MIME type of audio data provided in the input buffers.
|
||||||
|
* @param channelCount The number of channels.
|
||||||
|
* @param sampleRate The sample rate in Hz.
|
||||||
|
* @param pcmEncoding For PCM formats, the encoding used. One of {@link C#ENCODING_PCM_16BIT},
|
||||||
|
* {@link C#ENCODING_PCM_16BIT}, {@link C#ENCODING_PCM_24BIT} and
|
||||||
|
* {@link C#ENCODING_PCM_32BIT}.
|
||||||
|
* @param specifiedBufferSize A specific size for the playback buffer in bytes, or 0 to infer a
|
||||||
|
* suitable buffer size.
|
||||||
|
* @param outputChannels A mapping from input to output channels that is applied to this sink's
|
||||||
|
* input as a preprocessing step, if handling PCM input. Specify {@code null} to leave the
|
||||||
|
* input unchanged. Otherwise, the element at index {@code i} specifies index of the input
|
||||||
|
* channel to map to output channel {@code i} when preprocessing input buffers. After the
|
||||||
|
* map is applied the audio data will have {@code outputChannels.length} channels.
|
||||||
|
* @param trimStartSamples The number of audio samples to trim from the start of data written to
|
||||||
|
* the sink after this call.
|
||||||
|
* @param trimEndSamples The number of audio samples to trim from data written to the sink
|
||||||
|
* immediately preceding the next call to {@link #reset()} or
|
||||||
|
* {@link #configure(String, int, int, int, int, int[], int, int)}.
|
||||||
|
* @throws ConfigurationException If an error occurs configuring the sink.
|
||||||
|
*/
|
||||||
|
void configure(String mimeType, int channelCount, int sampleRate, @C.PcmEncoding int pcmEncoding,
|
||||||
|
int specifiedBufferSize, @Nullable int[] outputChannels, int trimStartSamples,
|
||||||
|
int trimEndSamples) throws ConfigurationException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts or resumes consuming audio if initialized.
|
||||||
|
*/
|
||||||
|
void play();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Signals to the sink that the next buffer is discontinuous with the previous buffer.
|
||||||
|
*/
|
||||||
|
void handleDiscontinuity();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to process data from a {@link ByteBuffer}, starting from its current position and
|
||||||
|
* ending at its limit (exclusive). The position of the {@link ByteBuffer} is advanced by the
|
||||||
|
* number of bytes that were handled. {@link Listener#onPositionDiscontinuity()} will be called if
|
||||||
|
* {@code presentationTimeUs} is discontinuous with the last buffer handled since the last reset.
|
||||||
|
* <p>
|
||||||
|
* Returns whether the data was handled in full. If the data was not handled in full then the same
|
||||||
|
* {@link ByteBuffer} must be provided to subsequent calls until it has been fully consumed,
|
||||||
|
* except in the case of an intervening call to {@link #reset()} (or to
|
||||||
|
* {@link #configure(String, int, int, int, int, int[], int, int)} that causes the sink to be
|
||||||
|
* reset).
|
||||||
|
*
|
||||||
|
* @param buffer The buffer containing audio data.
|
||||||
|
* @param presentationTimeUs The presentation timestamp of the buffer in microseconds.
|
||||||
|
* @return Whether the buffer was handled fully.
|
||||||
|
* @throws InitializationException If an error occurs initializing the sink.
|
||||||
|
* @throws WriteException If an error occurs writing the audio data.
|
||||||
|
*/
|
||||||
|
boolean handleBuffer(ByteBuffer buffer, long presentationTimeUs)
|
||||||
|
throws InitializationException, WriteException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Processes any remaining data. {@link #isEnded()} will return {@code true} when no data remains.
|
||||||
|
*
|
||||||
|
* @throws WriteException If an error occurs draining data to the sink.
|
||||||
|
*/
|
||||||
|
void playToEndOfStream() throws WriteException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns whether {@link #playToEndOfStream} has been called and all buffers have been processed.
|
||||||
|
*/
|
||||||
|
boolean isEnded();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns whether the sink has data pending that has not been consumed yet.
|
||||||
|
*/
|
||||||
|
boolean hasPendingData();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to set the playback parameters and returns the active playback parameters, which may
|
||||||
|
* differ from those passed in.
|
||||||
|
*
|
||||||
|
* @param playbackParameters The new playback parameters to attempt to set.
|
||||||
|
* @return The active playback parameters.
|
||||||
|
*/
|
||||||
|
PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the active {@link PlaybackParameters}.
|
||||||
|
*/
|
||||||
|
PlaybackParameters getPlaybackParameters();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets attributes for audio playback. If the attributes have changed and if the sink is not
|
||||||
|
* configured for use with tunneling, then it is reset and the audio session id is cleared.
|
||||||
|
* <p>
|
||||||
|
* If the sink is configured for use with tunneling then the audio attributes are ignored. The
|
||||||
|
* sink is not reset and the audio session id is not cleared. The passed attributes will be used
|
||||||
|
* if the sink is later re-configured into non-tunneled mode.
|
||||||
|
*
|
||||||
|
* @param audioAttributes The attributes for audio playback.
|
||||||
|
*/
|
||||||
|
void setAudioAttributes(AudioAttributes audioAttributes);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the audio session id.
|
||||||
|
*/
|
||||||
|
void setAudioSessionId(int audioSessionId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enables tunneling, if possible. The sink is reset if tunneling was previously disabled or if
|
||||||
|
* the audio session id has changed. Enabling tunneling is only possible if the sink is based on a
|
||||||
|
* platform {@link AudioTrack}, and requires platform API version 21 onwards.
|
||||||
|
*
|
||||||
|
* @param tunnelingAudioSessionId The audio session id to use.
|
||||||
|
* @throws IllegalStateException Thrown if enabling tunneling on platform API version < 21.
|
||||||
|
*/
|
||||||
|
void enableTunnelingV21(int tunnelingAudioSessionId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Disables tunneling. If tunneling was previously enabled then the sink is reset and any audio
|
||||||
|
* session id is cleared.
|
||||||
|
*/
|
||||||
|
void disableTunneling();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the playback volume.
|
||||||
|
*
|
||||||
|
* @param volume A volume in the range [0.0, 1.0].
|
||||||
|
*/
|
||||||
|
void setVolume(float volume);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pauses playback.
|
||||||
|
*/
|
||||||
|
void pause();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resets the sink, after which it is ready to receive buffers from a new playback position.
|
||||||
|
* <p>
|
||||||
|
* The audio session may remain active until {@link #release()} is called.
|
||||||
|
*/
|
||||||
|
void reset();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Releases any resources associated with this instance.
|
||||||
|
*/
|
||||||
|
void release();
|
||||||
|
|
||||||
|
}
|
@ -52,7 +52,7 @@ import java.util.Arrays;
|
|||||||
* Resets the channel mapping. After calling this method, call {@link #configure(int, int, int)}
|
* Resets the channel mapping. After calling this method, call {@link #configure(int, int, int)}
|
||||||
* to start using the new channel map.
|
* to start using the new channel map.
|
||||||
*
|
*
|
||||||
* @see AudioTrack#configure(String, int, int, int, int, int[], int, int)
|
* @see AudioSink#configure(String, int, int, int, int, int[], int, int)
|
||||||
*/
|
*/
|
||||||
public void setChannelMap(int[] outputChannels) {
|
public void setChannelMap(int[] outputChannels) {
|
||||||
pendingOutputChannels = outputChannels;
|
pendingOutputChannels = outputChannels;
|
||||||
|
@ -20,6 +20,7 @@ import android.annotation.TargetApi;
|
|||||||
import android.media.AudioFormat;
|
import android.media.AudioFormat;
|
||||||
import android.media.AudioManager;
|
import android.media.AudioManager;
|
||||||
import android.media.AudioTimestamp;
|
import android.media.AudioTimestamp;
|
||||||
|
import android.media.AudioTrack;
|
||||||
import android.os.ConditionVariable;
|
import android.os.ConditionVariable;
|
||||||
import android.os.SystemClock;
|
import android.os.SystemClock;
|
||||||
import android.support.annotation.IntDef;
|
import android.support.annotation.IntDef;
|
||||||
@ -39,131 +40,19 @@ import java.util.ArrayList;
|
|||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Plays audio data. The implementation delegates to an {@link android.media.AudioTrack} and handles
|
* Plays audio data. The implementation delegates to an {@link AudioTrack} and handles playback
|
||||||
* playback position smoothing, non-blocking writes and reconfiguration.
|
* position smoothing, non-blocking writes and reconfiguration.
|
||||||
* <p>
|
* <p>
|
||||||
* Before starting playback, specify the input format by calling
|
* If tunneling mode is enabled, care must be taken that audio processors do not output buffers with
|
||||||
* {@link #configure(String, int, int, int, int, int[], int, int)}. Optionally call
|
* a different duration than their input, and buffer processors must produce output corresponding to
|
||||||
* {@link #setAudioSessionId(int)}, {@link #setAudioAttributes(AudioAttributes)},
|
* their last input immediately after that input is queued. This means that, for example, speed
|
||||||
* {@link #enableTunnelingV21(int)} and {@link #disableTunneling()} to configure audio playback.
|
* adjustment is not possible while using tunneling.
|
||||||
* These methods may be called after writing data to the track, in which case it will be
|
|
||||||
* reinitialized as required.
|
|
||||||
* <p>
|
|
||||||
* Call {@link #handleBuffer(ByteBuffer, long)} to write data, and {@link #handleDiscontinuity()}
|
|
||||||
* when the data being fed is discontinuous. Call {@link #play()} to start playing the written data.
|
|
||||||
* <p>
|
|
||||||
* Call {@link #configure(String, int, int, int, int, int[], int, int)} whenever the input format
|
|
||||||
* changes. The track will be reinitialized on the next call to
|
|
||||||
* {@link #handleBuffer(ByteBuffer, long)}.
|
|
||||||
* <p>
|
|
||||||
* Calling {@link #reset()} releases the underlying {@link android.media.AudioTrack} (and so does
|
|
||||||
* calling {@link #configure(String, int, int, int, int, int[], int, int)} unless the format is
|
|
||||||
* unchanged). It is safe to call {@link #handleBuffer(ByteBuffer, long)} after {@link #reset()}
|
|
||||||
* without calling {@link #configure(String, int, int, int, int, int[], int, int)}.
|
|
||||||
* <p>
|
|
||||||
* Call {@link #playToEndOfStream()} repeatedly to play out all data when no more input buffers will
|
|
||||||
* be provided via {@link #handleBuffer(ByteBuffer, long)} until the next {@link #reset}. Call
|
|
||||||
* {@link #release()} when the instance is no longer required.
|
|
||||||
*/
|
*/
|
||||||
public final class AudioTrack {
|
public final class DefaultAudioSink implements AudioSink {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Listener for audio track events.
|
* Thrown when {@link AudioTrack#getTimestamp} returns a spurious timestamp, if
|
||||||
*/
|
* {@link #failOnSpuriousAudioTimestamp} is set.
|
||||||
public interface Listener {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called when the audio track has been initialized with a newly generated audio session id.
|
|
||||||
*
|
|
||||||
* @param audioSessionId The newly generated audio session id.
|
|
||||||
*/
|
|
||||||
void onAudioSessionId(int audioSessionId);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called when the audio track handles a buffer whose timestamp is discontinuous with the last
|
|
||||||
* buffer handled since it was reset.
|
|
||||||
*/
|
|
||||||
void onPositionDiscontinuity();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Called when the audio track underruns.
|
|
||||||
*
|
|
||||||
* @param bufferSize The size of the track's buffer, in bytes.
|
|
||||||
* @param bufferSizeMs The size of the track's buffer, in milliseconds, if it is configured for
|
|
||||||
* PCM output. {@link C#TIME_UNSET} if it is configured for passthrough output, as the
|
|
||||||
* buffered media can have a variable bitrate so the duration may be unknown.
|
|
||||||
* @param elapsedSinceLastFeedMs The time since the track was last fed data, in milliseconds.
|
|
||||||
*/
|
|
||||||
void onUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thrown when a failure occurs configuring the track.
|
|
||||||
*/
|
|
||||||
public static final class ConfigurationException extends Exception {
|
|
||||||
|
|
||||||
public ConfigurationException(Throwable cause) {
|
|
||||||
super(cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ConfigurationException(String message) {
|
|
||||||
super(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thrown when a failure occurs initializing an {@link android.media.AudioTrack}.
|
|
||||||
*/
|
|
||||||
public static final class InitializationException extends Exception {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The state as reported by {@link android.media.AudioTrack#getState()}.
|
|
||||||
*/
|
|
||||||
public final int audioTrackState;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param audioTrackState The state as reported by {@link android.media.AudioTrack#getState()}.
|
|
||||||
* @param sampleRate The requested sample rate in Hz.
|
|
||||||
* @param channelConfig The requested channel configuration.
|
|
||||||
* @param bufferSize The requested buffer size in bytes.
|
|
||||||
*/
|
|
||||||
public InitializationException(int audioTrackState, int sampleRate, int channelConfig,
|
|
||||||
int bufferSize) {
|
|
||||||
super("AudioTrack init failed: " + audioTrackState + ", Config(" + sampleRate + ", "
|
|
||||||
+ channelConfig + ", " + bufferSize + ")");
|
|
||||||
this.audioTrackState = audioTrackState;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thrown when a failure occurs writing to an {@link android.media.AudioTrack}.
|
|
||||||
*/
|
|
||||||
public static final class WriteException extends Exception {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The error value returned from {@link android.media.AudioTrack#write(byte[], int, int)} or
|
|
||||||
* {@link android.media.AudioTrack#write(ByteBuffer, int, int)}.
|
|
||||||
*/
|
|
||||||
public final int errorCode;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param errorCode The error value returned from
|
|
||||||
* {@link android.media.AudioTrack#write(byte[], int, int)} or
|
|
||||||
* {@link android.media.AudioTrack#write(ByteBuffer, int, int)}.
|
|
||||||
*/
|
|
||||||
public WriteException(int errorCode) {
|
|
||||||
super("AudioTrack write failed: " + errorCode);
|
|
||||||
this.errorCode = errorCode;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Thrown when {@link android.media.AudioTrack#getTimestamp} returns a spurious timestamp, if
|
|
||||||
* {@code AudioTrack#failOnSpuriousAudioTimestamp} is set.
|
|
||||||
*/
|
*/
|
||||||
public static final class InvalidAudioTrackTimestampException extends RuntimeException {
|
public static final class InvalidAudioTrackTimestampException extends RuntimeException {
|
||||||
|
|
||||||
@ -177,61 +66,56 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returned by {@link #getCurrentPositionUs(boolean)} when the position is not set.
|
* A minimum length for the {@link AudioTrack} buffer, in microseconds.
|
||||||
*/
|
|
||||||
public static final long CURRENT_POSITION_NOT_SET = Long.MIN_VALUE;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A minimum length for the {@link android.media.AudioTrack} buffer, in microseconds.
|
|
||||||
*/
|
*/
|
||||||
private static final long MIN_BUFFER_DURATION_US = 250000;
|
private static final long MIN_BUFFER_DURATION_US = 250000;
|
||||||
/**
|
/**
|
||||||
* A maximum length for the {@link android.media.AudioTrack} buffer, in microseconds.
|
* A maximum length for the {@link AudioTrack} buffer, in microseconds.
|
||||||
*/
|
*/
|
||||||
private static final long MAX_BUFFER_DURATION_US = 750000;
|
private static final long MAX_BUFFER_DURATION_US = 750000;
|
||||||
/**
|
/**
|
||||||
* The length for passthrough {@link android.media.AudioTrack} buffers, in microseconds.
|
* The length for passthrough {@link AudioTrack} buffers, in microseconds.
|
||||||
*/
|
*/
|
||||||
private static final long PASSTHROUGH_BUFFER_DURATION_US = 250000;
|
private static final long PASSTHROUGH_BUFFER_DURATION_US = 250000;
|
||||||
/**
|
/**
|
||||||
* A multiplication factor to apply to the minimum buffer size requested by the underlying
|
* A multiplication factor to apply to the minimum buffer size requested by the underlying
|
||||||
* {@link android.media.AudioTrack}.
|
* {@link AudioTrack}.
|
||||||
*/
|
*/
|
||||||
private static final int BUFFER_MULTIPLICATION_FACTOR = 4;
|
private static final int BUFFER_MULTIPLICATION_FACTOR = 4;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#PLAYSTATE_STOPPED
|
* @see AudioTrack#PLAYSTATE_STOPPED
|
||||||
*/
|
*/
|
||||||
private static final int PLAYSTATE_STOPPED = android.media.AudioTrack.PLAYSTATE_STOPPED;
|
private static final int PLAYSTATE_STOPPED = AudioTrack.PLAYSTATE_STOPPED;
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#PLAYSTATE_PAUSED
|
* @see AudioTrack#PLAYSTATE_PAUSED
|
||||||
*/
|
*/
|
||||||
private static final int PLAYSTATE_PAUSED = android.media.AudioTrack.PLAYSTATE_PAUSED;
|
private static final int PLAYSTATE_PAUSED = AudioTrack.PLAYSTATE_PAUSED;
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#PLAYSTATE_PLAYING
|
* @see AudioTrack#PLAYSTATE_PLAYING
|
||||||
*/
|
*/
|
||||||
private static final int PLAYSTATE_PLAYING = android.media.AudioTrack.PLAYSTATE_PLAYING;
|
private static final int PLAYSTATE_PLAYING = AudioTrack.PLAYSTATE_PLAYING;
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#ERROR_BAD_VALUE
|
* @see AudioTrack#ERROR_BAD_VALUE
|
||||||
*/
|
*/
|
||||||
private static final int ERROR_BAD_VALUE = android.media.AudioTrack.ERROR_BAD_VALUE;
|
private static final int ERROR_BAD_VALUE = AudioTrack.ERROR_BAD_VALUE;
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#MODE_STATIC
|
* @see AudioTrack#MODE_STATIC
|
||||||
*/
|
*/
|
||||||
private static final int MODE_STATIC = android.media.AudioTrack.MODE_STATIC;
|
private static final int MODE_STATIC = AudioTrack.MODE_STATIC;
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#MODE_STREAM
|
* @see AudioTrack#MODE_STREAM
|
||||||
*/
|
*/
|
||||||
private static final int MODE_STREAM = android.media.AudioTrack.MODE_STREAM;
|
private static final int MODE_STREAM = AudioTrack.MODE_STREAM;
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#STATE_INITIALIZED
|
* @see AudioTrack#STATE_INITIALIZED
|
||||||
*/
|
*/
|
||||||
private static final int STATE_INITIALIZED = android.media.AudioTrack.STATE_INITIALIZED;
|
private static final int STATE_INITIALIZED = AudioTrack.STATE_INITIALIZED;
|
||||||
/**
|
/**
|
||||||
* @see android.media.AudioTrack#WRITE_NON_BLOCKING
|
* @see AudioTrack#WRITE_NON_BLOCKING
|
||||||
*/
|
*/
|
||||||
@SuppressLint("InlinedApi")
|
@SuppressLint("InlinedApi")
|
||||||
private static final int WRITE_NON_BLOCKING = android.media.AudioTrack.WRITE_NON_BLOCKING;
|
private static final int WRITE_NON_BLOCKING = AudioTrack.WRITE_NON_BLOCKING;
|
||||||
|
|
||||||
private static final String TAG = "AudioTrack";
|
private static final String TAG = "AudioTrack";
|
||||||
|
|
||||||
@ -282,7 +166,7 @@ public final class AudioTrack {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Whether to throw an {@link InvalidAudioTrackTimestampException} when a spurious timestamp is
|
* Whether to throw an {@link InvalidAudioTrackTimestampException} when a spurious timestamp is
|
||||||
* reported from {@link android.media.AudioTrack#getTimestamp}.
|
* reported from {@link AudioTrack#getTimestamp}.
|
||||||
* <p>
|
* <p>
|
||||||
* The flag must be set before creating a player. Should be set to {@code true} for testing and
|
* The flag must be set before creating a player. Should be set to {@code true} for testing and
|
||||||
* debugging purposes only.
|
* debugging purposes only.
|
||||||
@ -294,18 +178,17 @@ public final class AudioTrack {
|
|||||||
private final TrimmingAudioProcessor trimmingAudioProcessor;
|
private final TrimmingAudioProcessor trimmingAudioProcessor;
|
||||||
private final SonicAudioProcessor sonicAudioProcessor;
|
private final SonicAudioProcessor sonicAudioProcessor;
|
||||||
private final AudioProcessor[] availableAudioProcessors;
|
private final AudioProcessor[] availableAudioProcessors;
|
||||||
private final Listener listener;
|
|
||||||
private final ConditionVariable releasingConditionVariable;
|
private final ConditionVariable releasingConditionVariable;
|
||||||
private final long[] playheadOffsets;
|
private final long[] playheadOffsets;
|
||||||
private final AudioTrackUtil audioTrackUtil;
|
private final AudioTrackUtil audioTrackUtil;
|
||||||
private final LinkedList<PlaybackParametersCheckpoint> playbackParametersCheckpoints;
|
private final LinkedList<PlaybackParametersCheckpoint> playbackParametersCheckpoints;
|
||||||
|
|
||||||
|
@Nullable private Listener listener;
|
||||||
/**
|
/**
|
||||||
* Used to keep the audio session active on pre-V21 builds (see {@link #initialize()}).
|
* Used to keep the audio session active on pre-V21 builds (see {@link #initialize()}).
|
||||||
*/
|
*/
|
||||||
private android.media.AudioTrack keepSessionIdAudioTrack;
|
private AudioTrack keepSessionIdAudioTrack;
|
||||||
|
private AudioTrack audioTrack;
|
||||||
private android.media.AudioTrack audioTrack;
|
|
||||||
private int sampleRate;
|
private int sampleRate;
|
||||||
private int channelConfig;
|
private int channelConfig;
|
||||||
private @C.Encoding int encoding;
|
private @C.Encoding int encoding;
|
||||||
@ -364,17 +247,15 @@ public final class AudioTrack {
|
|||||||
* default capabilities (no encoded audio passthrough support) should be assumed.
|
* default capabilities (no encoded audio passthrough support) should be assumed.
|
||||||
* @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
|
* @param audioProcessors An array of {@link AudioProcessor}s that will process PCM audio before
|
||||||
* output. May be empty.
|
* output. May be empty.
|
||||||
* @param listener Listener for audio track events.
|
|
||||||
*/
|
*/
|
||||||
public AudioTrack(@Nullable AudioCapabilities audioCapabilities, AudioProcessor[] audioProcessors,
|
public DefaultAudioSink(@Nullable AudioCapabilities audioCapabilities,
|
||||||
Listener listener) {
|
AudioProcessor[] audioProcessors) {
|
||||||
this.audioCapabilities = audioCapabilities;
|
this.audioCapabilities = audioCapabilities;
|
||||||
this.listener = listener;
|
|
||||||
releasingConditionVariable = new ConditionVariable(true);
|
releasingConditionVariable = new ConditionVariable(true);
|
||||||
if (Util.SDK_INT >= 18) {
|
if (Util.SDK_INT >= 18) {
|
||||||
try {
|
try {
|
||||||
getLatencyMethod =
|
getLatencyMethod =
|
||||||
android.media.AudioTrack.class.getMethod("getLatency", (Class<?>[]) null);
|
AudioTrack.class.getMethod("getLatency", (Class<?>[]) null);
|
||||||
} catch (NoSuchMethodException e) {
|
} catch (NoSuchMethodException e) {
|
||||||
// There's no guarantee this method exists. Do nothing.
|
// There's no guarantee this method exists. Do nothing.
|
||||||
}
|
}
|
||||||
@ -405,29 +286,21 @@ public final class AudioTrack {
|
|||||||
playbackParametersCheckpoints = new LinkedList<>();
|
playbackParametersCheckpoints = new LinkedList<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Returns whether it's possible to play audio in the specified format using encoded passthrough.
|
public void setListener(Listener listener) {
|
||||||
*
|
this.listener = listener;
|
||||||
* @param mimeType The format mime type.
|
}
|
||||||
* @return Whether it's possible to play audio in the format using encoded passthrough.
|
|
||||||
*/
|
@Override
|
||||||
public boolean isPassthroughSupported(String mimeType) {
|
public boolean isPassthroughSupported(String mimeType) {
|
||||||
return audioCapabilities != null
|
return audioCapabilities != null
|
||||||
&& audioCapabilities.supportsEncoding(getEncodingForMimeType(mimeType));
|
&& audioCapabilities.supportsEncoding(getEncodingForMimeType(mimeType));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Returns the playback position in the stream starting at zero, in microseconds, or
|
|
||||||
* {@link #CURRENT_POSITION_NOT_SET} if it is not yet available.
|
|
||||||
*
|
|
||||||
* <p>If the device supports it, the method uses the playback timestamp from
|
|
||||||
* {@link android.media.AudioTrack#getTimestamp}. Otherwise, it derives a smoothed position by
|
|
||||||
* sampling the {@link android.media.AudioTrack}'s frame position.
|
|
||||||
*
|
|
||||||
* @param sourceEnded Specify {@code true} if no more input buffers will be provided.
|
|
||||||
* @return The playback position relative to the start of playback, in microseconds.
|
|
||||||
*/
|
|
||||||
public long getCurrentPositionUs(boolean sourceEnded) {
|
public long getCurrentPositionUs(boolean sourceEnded) {
|
||||||
|
// If the device supports it, use the playback timestamp from AudioTrack.getTimestamp.
|
||||||
|
// Otherwise, derive a smoothed position by sampling the track's frame position.
|
||||||
if (!hasCurrentPositionUs()) {
|
if (!hasCurrentPositionUs()) {
|
||||||
return CURRENT_POSITION_NOT_SET;
|
return CURRENT_POSITION_NOT_SET;
|
||||||
}
|
}
|
||||||
@ -462,29 +335,7 @@ public final class AudioTrack {
|
|||||||
return startMediaTimeUs + applySpeedup(positionUs);
|
return startMediaTimeUs + applySpeedup(positionUs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Configures (or reconfigures) the audio track.
|
|
||||||
*
|
|
||||||
* @param mimeType The mime type.
|
|
||||||
* @param channelCount The number of channels.
|
|
||||||
* @param sampleRate The sample rate in Hz.
|
|
||||||
* @param pcmEncoding For PCM formats, the encoding used. One of {@link C#ENCODING_PCM_16BIT},
|
|
||||||
* {@link C#ENCODING_PCM_16BIT}, {@link C#ENCODING_PCM_24BIT} and
|
|
||||||
* {@link C#ENCODING_PCM_32BIT}.
|
|
||||||
* @param specifiedBufferSize A specific size for the playback buffer in bytes, or 0 to infer a
|
|
||||||
* suitable buffer size automatically.
|
|
||||||
* @param outputChannels A mapping from input to output channels that is applied to this track's
|
|
||||||
* input as a preprocessing step, if handling PCM input. Specify {@code null} to leave the
|
|
||||||
* input unchanged. Otherwise, the element at index {@code i} specifies index of the input
|
|
||||||
* channel to map to output channel {@code i} when preprocessing input buffers. After the
|
|
||||||
* map is applied the audio data will have {@code outputChannels.length} channels.
|
|
||||||
* @param trimStartSamples The number of audio samples to trim from the start of data written to
|
|
||||||
* the track after this call.
|
|
||||||
* @param trimEndSamples The number of audio samples to trim from data written to the track
|
|
||||||
* immediately preceding the next call to {@link #reset()} or
|
|
||||||
* {@link #configure(String, int, int, int, int, int[], int, int)}.
|
|
||||||
* @throws ConfigurationException If an error occurs configuring the track.
|
|
||||||
*/
|
|
||||||
public void configure(String mimeType, int channelCount, int sampleRate,
|
public void configure(String mimeType, int channelCount, int sampleRate,
|
||||||
@C.PcmEncoding int pcmEncoding, int specifiedBufferSize, @Nullable int[] outputChannels,
|
@C.PcmEncoding int pcmEncoding, int specifiedBufferSize, @Nullable int[] outputChannels,
|
||||||
int trimStartSamples, int trimEndSamples) throws ConfigurationException {
|
int trimStartSamples, int trimEndSamples) throws ConfigurationException {
|
||||||
@ -590,8 +441,7 @@ public final class AudioTrack {
|
|||||||
bufferSize = (int) (PASSTHROUGH_BUFFER_DURATION_US * 192 * 1024 / C.MICROS_PER_SECOND);
|
bufferSize = (int) (PASSTHROUGH_BUFFER_DURATION_US * 192 * 1024 / C.MICROS_PER_SECOND);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int minBufferSize =
|
int minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig, outputEncoding);
|
||||||
android.media.AudioTrack.getMinBufferSize(sampleRate, channelConfig, outputEncoding);
|
|
||||||
Assertions.checkState(minBufferSize != ERROR_BAD_VALUE);
|
Assertions.checkState(minBufferSize != ERROR_BAD_VALUE);
|
||||||
int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR;
|
int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR;
|
||||||
int minAppBufferSize = (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize;
|
int minAppBufferSize = (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize;
|
||||||
@ -651,7 +501,9 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
if (this.audioSessionId != audioSessionId) {
|
if (this.audioSessionId != audioSessionId) {
|
||||||
this.audioSessionId = audioSessionId;
|
this.audioSessionId = audioSessionId;
|
||||||
listener.onAudioSessionId(audioSessionId);
|
if (listener != null) {
|
||||||
|
listener.onAudioSessionId(audioSessionId);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
audioTrackUtil.reconfigure(audioTrack, needsPassthroughWorkarounds());
|
audioTrackUtil.reconfigure(audioTrack, needsPassthroughWorkarounds());
|
||||||
@ -659,9 +511,7 @@ public final class AudioTrack {
|
|||||||
hasData = false;
|
hasData = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Starts or resumes playing audio if the audio track has been initialized.
|
|
||||||
*/
|
|
||||||
public void play() {
|
public void play() {
|
||||||
playing = true;
|
playing = true;
|
||||||
if (isInitialized()) {
|
if (isInitialized()) {
|
||||||
@ -670,9 +520,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Signals to the audio track that the next buffer is discontinuous with the previous buffer.
|
|
||||||
*/
|
|
||||||
public void handleDiscontinuity() {
|
public void handleDiscontinuity() {
|
||||||
// Force resynchronization after a skipped buffer.
|
// Force resynchronization after a skipped buffer.
|
||||||
if (startMediaTimeState == START_IN_SYNC) {
|
if (startMediaTimeState == START_IN_SYNC) {
|
||||||
@ -680,24 +528,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Attempts to process data from a {@link ByteBuffer}, starting from its current position and
|
|
||||||
* ending at its limit (exclusive). The position of the {@link ByteBuffer} is advanced by the
|
|
||||||
* number of bytes that were handled. {@link Listener#onPositionDiscontinuity()} will be called if
|
|
||||||
* {@code presentationTimeUs} is discontinuous with the last buffer handled since the last reset.
|
|
||||||
* <p>
|
|
||||||
* Returns whether the data was handled in full. If the data was not handled in full then the same
|
|
||||||
* {@link ByteBuffer} must be provided to subsequent calls until it has been fully consumed,
|
|
||||||
* except in the case of an interleaving call to {@link #reset()} (or an interleaving call to
|
|
||||||
* {@link #configure(String, int, int, int, int, int[], int, int)} that caused the track to be
|
|
||||||
* reset).
|
|
||||||
*
|
|
||||||
* @param buffer The buffer containing audio data.
|
|
||||||
* @param presentationTimeUs The presentation timestamp of the buffer in microseconds.
|
|
||||||
* @return Whether the buffer was handled fully.
|
|
||||||
* @throws InitializationException If an error occurs initializing the track.
|
|
||||||
* @throws WriteException If an error occurs writing the audio data.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("ReferenceEquality")
|
@SuppressWarnings("ReferenceEquality")
|
||||||
public boolean handleBuffer(ByteBuffer buffer, long presentationTimeUs)
|
public boolean handleBuffer(ByteBuffer buffer, long presentationTimeUs)
|
||||||
throws InitializationException, WriteException {
|
throws InitializationException, WriteException {
|
||||||
@ -729,7 +560,7 @@ public final class AudioTrack {
|
|||||||
|
|
||||||
boolean hadData = hasData;
|
boolean hadData = hasData;
|
||||||
hasData = hasPendingData();
|
hasData = hasPendingData();
|
||||||
if (hadData && !hasData && audioTrack.getPlayState() != PLAYSTATE_STOPPED) {
|
if (hadData && !hasData && audioTrack.getPlayState() != PLAYSTATE_STOPPED && listener != null) {
|
||||||
long elapsedSinceLastFeedMs = SystemClock.elapsedRealtime() - lastFeedElapsedRealtimeMs;
|
long elapsedSinceLastFeedMs = SystemClock.elapsedRealtime() - lastFeedElapsedRealtimeMs;
|
||||||
listener.onUnderrun(bufferSize, C.usToMs(bufferSizeUs), elapsedSinceLastFeedMs);
|
listener.onUnderrun(bufferSize, C.usToMs(bufferSizeUs), elapsedSinceLastFeedMs);
|
||||||
}
|
}
|
||||||
@ -779,7 +610,9 @@ public final class AudioTrack {
|
|||||||
// number of bytes submitted.
|
// number of bytes submitted.
|
||||||
startMediaTimeUs += (presentationTimeUs - expectedPresentationTimeUs);
|
startMediaTimeUs += (presentationTimeUs - expectedPresentationTimeUs);
|
||||||
startMediaTimeState = START_IN_SYNC;
|
startMediaTimeState = START_IN_SYNC;
|
||||||
listener.onPositionDiscontinuity();
|
if (listener != null) {
|
||||||
|
listener.onPositionDiscontinuity();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -899,11 +732,7 @@ public final class AudioTrack {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Plays out remaining audio. {@link #isEnded()} will return {@code true} when playback has ended.
|
|
||||||
*
|
|
||||||
* @throws WriteException If an error occurs draining data to the track.
|
|
||||||
*/
|
|
||||||
public void playToEndOfStream() throws WriteException {
|
public void playToEndOfStream() throws WriteException {
|
||||||
if (handledEndOfStream || !isInitialized()) {
|
if (handledEndOfStream || !isInitialized()) {
|
||||||
return;
|
return;
|
||||||
@ -947,30 +776,19 @@ public final class AudioTrack {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Returns whether all buffers passed to {@link #handleBuffer(ByteBuffer, long)} have been
|
|
||||||
* completely processed and played.
|
|
||||||
*/
|
|
||||||
public boolean isEnded() {
|
public boolean isEnded() {
|
||||||
return !isInitialized() || (handledEndOfStream && !hasPendingData());
|
return !isInitialized() || (handledEndOfStream && !hasPendingData());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Returns whether the audio track has more data pending that will be played back.
|
|
||||||
*/
|
|
||||||
public boolean hasPendingData() {
|
public boolean hasPendingData() {
|
||||||
return isInitialized()
|
return isInitialized()
|
||||||
&& (getWrittenFrames() > audioTrackUtil.getPlaybackHeadPosition()
|
&& (getWrittenFrames() > audioTrackUtil.getPlaybackHeadPosition()
|
||||||
|| overrideHasPendingData());
|
|| overrideHasPendingData());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Attempts to set the playback parameters and returns the active playback parameters, which may
|
|
||||||
* differ from those passed in.
|
|
||||||
*
|
|
||||||
* @param playbackParameters The new playback parameters to attempt to set.
|
|
||||||
* @return The active playback parameters.
|
|
||||||
*/
|
|
||||||
public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) {
|
public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) {
|
||||||
if (passthrough) {
|
if (passthrough) {
|
||||||
// The playback parameters are always the default in passthrough mode.
|
// The playback parameters are always the default in passthrough mode.
|
||||||
@ -997,24 +815,12 @@ public final class AudioTrack {
|
|||||||
return this.playbackParameters;
|
return this.playbackParameters;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Gets the {@link PlaybackParameters}.
|
|
||||||
*/
|
|
||||||
public PlaybackParameters getPlaybackParameters() {
|
public PlaybackParameters getPlaybackParameters() {
|
||||||
return playbackParameters;
|
return playbackParameters;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Sets the attributes for audio playback. If the attributes have changed and if the audio track
|
|
||||||
* is not configured for use with tunneling, then the audio track is reset and the audio session
|
|
||||||
* id is cleared.
|
|
||||||
* <p>
|
|
||||||
* If the audio track is configured for use with tunneling then the audio attributes are ignored.
|
|
||||||
* The audio track is not reset and the audio session id is not cleared. The passed attributes
|
|
||||||
* will be used if the audio track is later re-configured into non-tunneled mode.
|
|
||||||
*
|
|
||||||
* @param audioAttributes The attributes for audio playback.
|
|
||||||
*/
|
|
||||||
public void setAudioAttributes(AudioAttributes audioAttributes) {
|
public void setAudioAttributes(AudioAttributes audioAttributes) {
|
||||||
if (this.audioAttributes.equals(audioAttributes)) {
|
if (this.audioAttributes.equals(audioAttributes)) {
|
||||||
return;
|
return;
|
||||||
@ -1028,9 +834,7 @@ public final class AudioTrack {
|
|||||||
audioSessionId = C.AUDIO_SESSION_ID_UNSET;
|
audioSessionId = C.AUDIO_SESSION_ID_UNSET;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Sets the audio session id. The audio track is reset if the audio session id has changed.
|
|
||||||
*/
|
|
||||||
public void setAudioSessionId(int audioSessionId) {
|
public void setAudioSessionId(int audioSessionId) {
|
||||||
if (this.audioSessionId != audioSessionId) {
|
if (this.audioSessionId != audioSessionId) {
|
||||||
this.audioSessionId = audioSessionId;
|
this.audioSessionId = audioSessionId;
|
||||||
@ -1038,18 +842,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Enables tunneling. The audio track is reset if tunneling was previously disabled or if the
|
|
||||||
* audio session id has changed. Enabling tunneling requires platform API version 21 onwards.
|
|
||||||
* <p>
|
|
||||||
* If this instance has {@link AudioProcessor}s and tunneling is enabled, care must be taken that
|
|
||||||
* audio processors do not output buffers with a different duration than their input, and buffer
|
|
||||||
* processors must produce output corresponding to their last input immediately after that input
|
|
||||||
* is queued.
|
|
||||||
*
|
|
||||||
* @param tunnelingAudioSessionId The audio session id to use.
|
|
||||||
* @throws IllegalStateException Thrown if enabling tunneling on platform API version < 21.
|
|
||||||
*/
|
|
||||||
public void enableTunnelingV21(int tunnelingAudioSessionId) {
|
public void enableTunnelingV21(int tunnelingAudioSessionId) {
|
||||||
Assertions.checkState(Util.SDK_INT >= 21);
|
Assertions.checkState(Util.SDK_INT >= 21);
|
||||||
if (!tunneling || audioSessionId != tunnelingAudioSessionId) {
|
if (!tunneling || audioSessionId != tunnelingAudioSessionId) {
|
||||||
@ -1059,10 +852,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Disables tunneling. If tunneling was previously enabled then the audio track is reset and the
|
|
||||||
* audio session id is cleared.
|
|
||||||
*/
|
|
||||||
public void disableTunneling() {
|
public void disableTunneling() {
|
||||||
if (tunneling) {
|
if (tunneling) {
|
||||||
tunneling = false;
|
tunneling = false;
|
||||||
@ -1071,11 +861,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Sets the playback volume.
|
|
||||||
*
|
|
||||||
* @param volume A volume in the range [0.0, 1.0].
|
|
||||||
*/
|
|
||||||
public void setVolume(float volume) {
|
public void setVolume(float volume) {
|
||||||
if (this.volume != volume) {
|
if (this.volume != volume) {
|
||||||
this.volume = volume;
|
this.volume = volume;
|
||||||
@ -1093,9 +879,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Pauses playback.
|
|
||||||
*/
|
|
||||||
public void pause() {
|
public void pause() {
|
||||||
playing = false;
|
playing = false;
|
||||||
if (isInitialized()) {
|
if (isInitialized()) {
|
||||||
@ -1104,13 +888,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Releases the underlying audio track asynchronously.
|
|
||||||
* <p>
|
|
||||||
* Calling {@link #handleBuffer(ByteBuffer, long)} will block until the audio track has been
|
|
||||||
* released, so it is safe to use the audio track immediately after a reset. The audio session may
|
|
||||||
* remain active until {@link #release()} is called.
|
|
||||||
*/
|
|
||||||
public void reset() {
|
public void reset() {
|
||||||
if (isInitialized()) {
|
if (isInitialized()) {
|
||||||
submittedPcmBytes = 0;
|
submittedPcmBytes = 0;
|
||||||
@ -1146,7 +924,7 @@ public final class AudioTrack {
|
|||||||
audioTrack.pause();
|
audioTrack.pause();
|
||||||
}
|
}
|
||||||
// AudioTrack.release can take some time, so we call it on a background thread.
|
// AudioTrack.release can take some time, so we call it on a background thread.
|
||||||
final android.media.AudioTrack toRelease = audioTrack;
|
final AudioTrack toRelease = audioTrack;
|
||||||
audioTrack = null;
|
audioTrack = null;
|
||||||
audioTrackUtil.reconfigure(null, false);
|
audioTrackUtil.reconfigure(null, false);
|
||||||
releasingConditionVariable.close();
|
releasingConditionVariable.close();
|
||||||
@ -1164,9 +942,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Releases all resources associated with this instance.
|
|
||||||
*/
|
|
||||||
public void release() {
|
public void release() {
|
||||||
reset();
|
reset();
|
||||||
releaseKeepSessionIdAudioTrack();
|
releaseKeepSessionIdAudioTrack();
|
||||||
@ -1186,7 +962,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AudioTrack.release can take some time, so we call it on a background thread.
|
// AudioTrack.release can take some time, so we call it on a background thread.
|
||||||
final android.media.AudioTrack toRelease = keepSessionIdAudioTrack;
|
final AudioTrack toRelease = keepSessionIdAudioTrack;
|
||||||
keepSessionIdAudioTrack = null;
|
keepSessionIdAudioTrack = null;
|
||||||
new Thread() {
|
new Thread() {
|
||||||
@Override
|
@Override
|
||||||
@ -1367,19 +1143,19 @@ public final class AudioTrack {
|
|||||||
&& audioTrack.getPlaybackHeadPosition() == 0;
|
&& audioTrack.getPlaybackHeadPosition() == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
private android.media.AudioTrack initializeAudioTrack() throws InitializationException {
|
private AudioTrack initializeAudioTrack() throws InitializationException {
|
||||||
android.media.AudioTrack audioTrack;
|
AudioTrack audioTrack;
|
||||||
if (Util.SDK_INT >= 21) {
|
if (Util.SDK_INT >= 21) {
|
||||||
audioTrack = createAudioTrackV21();
|
audioTrack = createAudioTrackV21();
|
||||||
} else {
|
} else {
|
||||||
int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage);
|
int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage);
|
||||||
if (audioSessionId == C.AUDIO_SESSION_ID_UNSET) {
|
if (audioSessionId == C.AUDIO_SESSION_ID_UNSET) {
|
||||||
audioTrack = new android.media.AudioTrack(streamType, sampleRate, channelConfig,
|
audioTrack = new AudioTrack(streamType, sampleRate, channelConfig, outputEncoding,
|
||||||
outputEncoding, bufferSize, MODE_STREAM);
|
bufferSize, MODE_STREAM);
|
||||||
} else {
|
} else {
|
||||||
// Re-attach to the same audio session.
|
// Re-attach to the same audio session.
|
||||||
audioTrack = new android.media.AudioTrack(streamType, sampleRate, channelConfig,
|
audioTrack = new AudioTrack(streamType, sampleRate, channelConfig, outputEncoding,
|
||||||
outputEncoding, bufferSize, MODE_STREAM, audioSessionId);
|
bufferSize, MODE_STREAM, audioSessionId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1397,7 +1173,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@TargetApi(21)
|
@TargetApi(21)
|
||||||
private android.media.AudioTrack createAudioTrackV21() {
|
private AudioTrack createAudioTrackV21() {
|
||||||
android.media.AudioAttributes attributes;
|
android.media.AudioAttributes attributes;
|
||||||
if (tunneling) {
|
if (tunneling) {
|
||||||
attributes = new android.media.AudioAttributes.Builder()
|
attributes = new android.media.AudioAttributes.Builder()
|
||||||
@ -1415,17 +1191,16 @@ public final class AudioTrack {
|
|||||||
.build();
|
.build();
|
||||||
int audioSessionId = this.audioSessionId != C.AUDIO_SESSION_ID_UNSET ? this.audioSessionId
|
int audioSessionId = this.audioSessionId != C.AUDIO_SESSION_ID_UNSET ? this.audioSessionId
|
||||||
: AudioManager.AUDIO_SESSION_ID_GENERATE;
|
: AudioManager.AUDIO_SESSION_ID_GENERATE;
|
||||||
return new android.media.AudioTrack(attributes, format, bufferSize, MODE_STREAM,
|
return new AudioTrack(attributes, format, bufferSize, MODE_STREAM, audioSessionId);
|
||||||
audioSessionId);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private android.media.AudioTrack initializeKeepSessionIdAudioTrack(int audioSessionId) {
|
private AudioTrack initializeKeepSessionIdAudioTrack(int audioSessionId) {
|
||||||
int sampleRate = 4000; // Equal to private android.media.AudioTrack.MIN_SAMPLE_RATE.
|
int sampleRate = 4000; // Equal to private AudioTrack.MIN_SAMPLE_RATE.
|
||||||
int channelConfig = AudioFormat.CHANNEL_OUT_MONO;
|
int channelConfig = AudioFormat.CHANNEL_OUT_MONO;
|
||||||
@C.PcmEncoding int encoding = C.ENCODING_PCM_16BIT;
|
@C.PcmEncoding int encoding = C.ENCODING_PCM_16BIT;
|
||||||
int bufferSize = 2; // Use a two byte buffer, as it is not actually used for playback.
|
int bufferSize = 2; // Use a two byte buffer, as it is not actually used for playback.
|
||||||
return new android.media.AudioTrack(C.STREAM_TYPE_DEFAULT, sampleRate, channelConfig, encoding,
|
return new AudioTrack(C.STREAM_TYPE_DEFAULT, sampleRate, channelConfig, encoding, bufferSize,
|
||||||
bufferSize, MODE_STATIC, audioSessionId);
|
MODE_STATIC, audioSessionId);
|
||||||
}
|
}
|
||||||
|
|
||||||
@C.Encoding
|
@C.Encoding
|
||||||
@ -1457,14 +1232,13 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@TargetApi(21)
|
@TargetApi(21)
|
||||||
private static int writeNonBlockingV21(android.media.AudioTrack audioTrack, ByteBuffer buffer,
|
private static int writeNonBlockingV21(AudioTrack audioTrack, ByteBuffer buffer, int size) {
|
||||||
int size) {
|
|
||||||
return audioTrack.write(buffer, size, WRITE_NON_BLOCKING);
|
return audioTrack.write(buffer, size, WRITE_NON_BLOCKING);
|
||||||
}
|
}
|
||||||
|
|
||||||
@TargetApi(21)
|
@TargetApi(21)
|
||||||
private int writeNonBlockingWithAvSyncV21(android.media.AudioTrack audioTrack,
|
private int writeNonBlockingWithAvSyncV21(AudioTrack audioTrack, ByteBuffer buffer, int size,
|
||||||
ByteBuffer buffer, int size, long presentationTimeUs) {
|
long presentationTimeUs) {
|
||||||
// TODO: Uncomment this when [Internal ref: b/33627517] is clarified or fixed.
|
// TODO: Uncomment this when [Internal ref: b/33627517] is clarified or fixed.
|
||||||
// if (Util.SDK_INT >= 23) {
|
// if (Util.SDK_INT >= 23) {
|
||||||
// // The underlying platform AudioTrack writes AV sync headers directly.
|
// // The underlying platform AudioTrack writes AV sync headers directly.
|
||||||
@ -1502,21 +1276,21 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@TargetApi(21)
|
@TargetApi(21)
|
||||||
private static void setVolumeInternalV21(android.media.AudioTrack audioTrack, float volume) {
|
private static void setVolumeInternalV21(AudioTrack audioTrack, float volume) {
|
||||||
audioTrack.setVolume(volume);
|
audioTrack.setVolume(volume);
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
private static void setVolumeInternalV3(android.media.AudioTrack audioTrack, float volume) {
|
private static void setVolumeInternalV3(AudioTrack audioTrack, float volume) {
|
||||||
audioTrack.setStereoVolume(volume, volume);
|
audioTrack.setStereoVolume(volume, volume);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wraps an {@link android.media.AudioTrack} to expose useful utility methods.
|
* Wraps an {@link AudioTrack} to expose useful utility methods.
|
||||||
*/
|
*/
|
||||||
private static class AudioTrackUtil {
|
private static class AudioTrackUtil {
|
||||||
|
|
||||||
protected android.media.AudioTrack audioTrack;
|
protected AudioTrack audioTrack;
|
||||||
private boolean needsPassthroughWorkaround;
|
private boolean needsPassthroughWorkaround;
|
||||||
private int sampleRate;
|
private int sampleRate;
|
||||||
private long lastRawPlaybackHeadPosition;
|
private long lastRawPlaybackHeadPosition;
|
||||||
@ -1534,8 +1308,7 @@ public final class AudioTrack {
|
|||||||
* @param needsPassthroughWorkaround Whether to workaround issues with pausing AC-3 passthrough
|
* @param needsPassthroughWorkaround Whether to workaround issues with pausing AC-3 passthrough
|
||||||
* audio tracks on platform API version 21/22.
|
* audio tracks on platform API version 21/22.
|
||||||
*/
|
*/
|
||||||
public void reconfigure(android.media.AudioTrack audioTrack,
|
public void reconfigure(AudioTrack audioTrack, boolean needsPassthroughWorkaround) {
|
||||||
boolean needsPassthroughWorkaround) {
|
|
||||||
this.audioTrack = audioTrack;
|
this.audioTrack = audioTrack;
|
||||||
this.needsPassthroughWorkaround = needsPassthroughWorkaround;
|
this.needsPassthroughWorkaround = needsPassthroughWorkaround;
|
||||||
stopTimestampUs = C.TIME_UNSET;
|
stopTimestampUs = C.TIME_UNSET;
|
||||||
@ -1574,9 +1347,9 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@link android.media.AudioTrack#getPlaybackHeadPosition()} returns a value intended to be
|
* {@link AudioTrack#getPlaybackHeadPosition()} returns a value intended to be interpreted as an
|
||||||
* interpreted as an unsigned 32 bit integer, which also wraps around periodically. This method
|
* unsigned 32 bit integer, which also wraps around periodically. This method returns the
|
||||||
* returns the playback head position as a long that will only wrap around if the value exceeds
|
* playback head position as a long that will only wrap around if the value exceeds
|
||||||
* {@link Long#MAX_VALUE} (which in practice will never happen).
|
* {@link Long#MAX_VALUE} (which in practice will never happen).
|
||||||
*
|
*
|
||||||
* @return The playback head position, in frames.
|
* @return The playback head position, in frames.
|
||||||
@ -1676,8 +1449,7 @@ public final class AudioTrack {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void reconfigure(android.media.AudioTrack audioTrack,
|
public void reconfigure(AudioTrack audioTrack, boolean needsPassthroughWorkaround) {
|
||||||
boolean needsPassthroughWorkaround) {
|
|
||||||
super.reconfigure(audioTrack, needsPassthroughWorkaround);
|
super.reconfigure(audioTrack, needsPassthroughWorkaround);
|
||||||
rawTimestampFramePositionWrapCount = 0;
|
rawTimestampFramePositionWrapCount = 0;
|
||||||
lastRawTimestampFramePosition = 0;
|
lastRawTimestampFramePosition = 0;
|
@ -40,13 +40,13 @@ import com.google.android.exoplayer2.util.Util;
|
|||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decodes and renders audio using {@link MediaCodec} and {@link AudioTrack}.
|
* Decodes and renders audio using {@link MediaCodec} and an {@link AudioSink}.
|
||||||
*/
|
*/
|
||||||
@TargetApi(16)
|
@TargetApi(16)
|
||||||
public class MediaCodecAudioRenderer extends MediaCodecRenderer implements MediaClock {
|
public class MediaCodecAudioRenderer extends MediaCodecRenderer implements MediaClock {
|
||||||
|
|
||||||
private final EventDispatcher eventDispatcher;
|
private final EventDispatcher eventDispatcher;
|
||||||
private final AudioTrack audioTrack;
|
private final AudioSink audioSink;
|
||||||
|
|
||||||
private boolean passthroughEnabled;
|
private boolean passthroughEnabled;
|
||||||
private boolean codecNeedsDiscardChannelsWorkaround;
|
private boolean codecNeedsDiscardChannelsWorkaround;
|
||||||
@ -110,7 +110,7 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
boolean playClearSamplesWithoutKeys, @Nullable Handler eventHandler,
|
boolean playClearSamplesWithoutKeys, @Nullable Handler eventHandler,
|
||||||
@Nullable AudioRendererEventListener eventListener) {
|
@Nullable AudioRendererEventListener eventListener) {
|
||||||
this(mediaCodecSelector, drmSessionManager, playClearSamplesWithoutKeys, eventHandler,
|
this(mediaCodecSelector, drmSessionManager, playClearSamplesWithoutKeys, eventHandler,
|
||||||
eventListener, null);
|
eventListener, (AudioCapabilities) null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -135,9 +135,32 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
boolean playClearSamplesWithoutKeys, @Nullable Handler eventHandler,
|
boolean playClearSamplesWithoutKeys, @Nullable Handler eventHandler,
|
||||||
@Nullable AudioRendererEventListener eventListener,
|
@Nullable AudioRendererEventListener eventListener,
|
||||||
@Nullable AudioCapabilities audioCapabilities, AudioProcessor... audioProcessors) {
|
@Nullable AudioCapabilities audioCapabilities, AudioProcessor... audioProcessors) {
|
||||||
|
this(mediaCodecSelector, drmSessionManager, playClearSamplesWithoutKeys,
|
||||||
|
eventHandler, eventListener, new DefaultAudioSink(audioCapabilities, audioProcessors));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param mediaCodecSelector A decoder selector.
|
||||||
|
* @param drmSessionManager For use with encrypted content. May be null if support for encrypted
|
||||||
|
* content is not required.
|
||||||
|
* @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
|
||||||
|
* For example a media file may start with a short clear region so as to allow playback to
|
||||||
|
* begin in parallel with key acquisition. This parameter specifies whether the renderer is
|
||||||
|
* permitted to play clear regions of encrypted media files before {@code drmSessionManager}
|
||||||
|
* has obtained the keys necessary to decrypt encrypted regions of the media.
|
||||||
|
* @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
|
||||||
|
* null if delivery of events is not required.
|
||||||
|
* @param eventListener A listener of events. May be null if delivery of events is not required.
|
||||||
|
* @param audioSink The sink to which audio will be output.
|
||||||
|
*/
|
||||||
|
public MediaCodecAudioRenderer(MediaCodecSelector mediaCodecSelector,
|
||||||
|
@Nullable DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
|
||||||
|
boolean playClearSamplesWithoutKeys, @Nullable Handler eventHandler,
|
||||||
|
@Nullable AudioRendererEventListener eventListener, AudioSink audioSink) {
|
||||||
super(C.TRACK_TYPE_AUDIO, mediaCodecSelector, drmSessionManager, playClearSamplesWithoutKeys);
|
super(C.TRACK_TYPE_AUDIO, mediaCodecSelector, drmSessionManager, playClearSamplesWithoutKeys);
|
||||||
eventDispatcher = new EventDispatcher(eventHandler, eventListener);
|
eventDispatcher = new EventDispatcher(eventHandler, eventListener);
|
||||||
audioTrack = new AudioTrack(audioCapabilities, audioProcessors, new AudioTrackListener());
|
this.audioSink = audioSink;
|
||||||
|
audioSink.setListener(new AudioSinkListener());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -196,14 +219,14 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns whether encoded audio passthrough should be used for playing back the input format.
|
* Returns whether encoded audio passthrough should be used for playing back the input format.
|
||||||
* This implementation returns true if the {@link AudioTrack}'s audio capabilities indicate that
|
* This implementation returns true if the {@link AudioSink} indicates that passthrough is
|
||||||
* passthrough is supported.
|
* supported.
|
||||||
*
|
*
|
||||||
* @param mimeType The type of input media.
|
* @param mimeType The type of input media.
|
||||||
* @return Whether passthrough playback should be used.
|
* @return Whether passthrough playback is supported.
|
||||||
*/
|
*/
|
||||||
protected boolean allowPassthrough(String mimeType) {
|
protected boolean allowPassthrough(String mimeType) {
|
||||||
return audioTrack.isPassthroughSupported(mimeType);
|
return audioSink.isPassthroughSupported(mimeType);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -266,9 +289,9 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
audioTrack.configure(mimeType, channelCount, sampleRate, pcmEncoding, 0, channelMap,
|
audioSink.configure(mimeType, channelCount, sampleRate, pcmEncoding, 0, channelMap,
|
||||||
encoderDelay, encoderPadding);
|
encoderDelay, encoderPadding);
|
||||||
} catch (AudioTrack.ConfigurationException e) {
|
} catch (AudioSink.ConfigurationException e) {
|
||||||
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -279,21 +302,21 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
* order to spatialize the audio channels. For this use case, any {@link Virtualizer} instances
|
* order to spatialize the audio channels. For this use case, any {@link Virtualizer} instances
|
||||||
* should be released in {@link #onDisabled()} (if not before).
|
* should be released in {@link #onDisabled()} (if not before).
|
||||||
*
|
*
|
||||||
* @see AudioTrack.Listener#onAudioSessionId(int)
|
* @see AudioSink.Listener#onAudioSessionId(int)
|
||||||
*/
|
*/
|
||||||
protected void onAudioSessionId(int audioSessionId) {
|
protected void onAudioSessionId(int audioSessionId) {
|
||||||
// Do nothing.
|
// Do nothing.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see AudioTrack.Listener#onPositionDiscontinuity()
|
* @see AudioSink.Listener#onPositionDiscontinuity()
|
||||||
*/
|
*/
|
||||||
protected void onAudioTrackPositionDiscontinuity() {
|
protected void onAudioTrackPositionDiscontinuity() {
|
||||||
// Do nothing.
|
// Do nothing.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see AudioTrack.Listener#onUnderrun(int, long, long)
|
* @see AudioSink.Listener#onUnderrun(int, long, long)
|
||||||
*/
|
*/
|
||||||
protected void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs,
|
protected void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs,
|
||||||
long elapsedSinceLastFeedMs) {
|
long elapsedSinceLastFeedMs) {
|
||||||
@ -306,16 +329,16 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
eventDispatcher.enabled(decoderCounters);
|
eventDispatcher.enabled(decoderCounters);
|
||||||
int tunnelingAudioSessionId = getConfiguration().tunnelingAudioSessionId;
|
int tunnelingAudioSessionId = getConfiguration().tunnelingAudioSessionId;
|
||||||
if (tunnelingAudioSessionId != C.AUDIO_SESSION_ID_UNSET) {
|
if (tunnelingAudioSessionId != C.AUDIO_SESSION_ID_UNSET) {
|
||||||
audioTrack.enableTunnelingV21(tunnelingAudioSessionId);
|
audioSink.enableTunnelingV21(tunnelingAudioSessionId);
|
||||||
} else {
|
} else {
|
||||||
audioTrack.disableTunneling();
|
audioSink.disableTunneling();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onPositionReset(long positionUs, boolean joining) throws ExoPlaybackException {
|
protected void onPositionReset(long positionUs, boolean joining) throws ExoPlaybackException {
|
||||||
super.onPositionReset(positionUs, joining);
|
super.onPositionReset(positionUs, joining);
|
||||||
audioTrack.reset();
|
audioSink.reset();
|
||||||
currentPositionUs = positionUs;
|
currentPositionUs = positionUs;
|
||||||
allowPositionDiscontinuity = true;
|
allowPositionDiscontinuity = true;
|
||||||
}
|
}
|
||||||
@ -323,19 +346,19 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
@Override
|
@Override
|
||||||
protected void onStarted() {
|
protected void onStarted() {
|
||||||
super.onStarted();
|
super.onStarted();
|
||||||
audioTrack.play();
|
audioSink.play();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onStopped() {
|
protected void onStopped() {
|
||||||
audioTrack.pause();
|
audioSink.pause();
|
||||||
super.onStopped();
|
super.onStopped();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onDisabled() {
|
protected void onDisabled() {
|
||||||
try {
|
try {
|
||||||
audioTrack.release();
|
audioSink.release();
|
||||||
} finally {
|
} finally {
|
||||||
try {
|
try {
|
||||||
super.onDisabled();
|
super.onDisabled();
|
||||||
@ -348,18 +371,18 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isEnded() {
|
public boolean isEnded() {
|
||||||
return super.isEnded() && audioTrack.isEnded();
|
return super.isEnded() && audioSink.isEnded();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isReady() {
|
public boolean isReady() {
|
||||||
return audioTrack.hasPendingData() || super.isReady();
|
return audioSink.hasPendingData() || super.isReady();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getPositionUs() {
|
public long getPositionUs() {
|
||||||
long newCurrentPositionUs = audioTrack.getCurrentPositionUs(isEnded());
|
long newCurrentPositionUs = audioSink.getCurrentPositionUs(isEnded());
|
||||||
if (newCurrentPositionUs != AudioTrack.CURRENT_POSITION_NOT_SET) {
|
if (newCurrentPositionUs != AudioSink.CURRENT_POSITION_NOT_SET) {
|
||||||
currentPositionUs = allowPositionDiscontinuity ? newCurrentPositionUs
|
currentPositionUs = allowPositionDiscontinuity ? newCurrentPositionUs
|
||||||
: Math.max(currentPositionUs, newCurrentPositionUs);
|
: Math.max(currentPositionUs, newCurrentPositionUs);
|
||||||
allowPositionDiscontinuity = false;
|
allowPositionDiscontinuity = false;
|
||||||
@ -369,12 +392,12 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) {
|
public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) {
|
||||||
return audioTrack.setPlaybackParameters(playbackParameters);
|
return audioSink.setPlaybackParameters(playbackParameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PlaybackParameters getPlaybackParameters() {
|
public PlaybackParameters getPlaybackParameters() {
|
||||||
return audioTrack.getPlaybackParameters();
|
return audioSink.getPlaybackParameters();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -390,17 +413,17 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
if (shouldSkip) {
|
if (shouldSkip) {
|
||||||
codec.releaseOutputBuffer(bufferIndex, false);
|
codec.releaseOutputBuffer(bufferIndex, false);
|
||||||
decoderCounters.skippedOutputBufferCount++;
|
decoderCounters.skippedOutputBufferCount++;
|
||||||
audioTrack.handleDiscontinuity();
|
audioSink.handleDiscontinuity();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (audioTrack.handleBuffer(buffer, bufferPresentationTimeUs)) {
|
if (audioSink.handleBuffer(buffer, bufferPresentationTimeUs)) {
|
||||||
codec.releaseOutputBuffer(bufferIndex, false);
|
codec.releaseOutputBuffer(bufferIndex, false);
|
||||||
decoderCounters.renderedOutputBufferCount++;
|
decoderCounters.renderedOutputBufferCount++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} catch (AudioTrack.InitializationException | AudioTrack.WriteException e) {
|
} catch (AudioSink.InitializationException | AudioSink.WriteException e) {
|
||||||
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -409,8 +432,8 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
@Override
|
@Override
|
||||||
protected void renderToEndOfStream() throws ExoPlaybackException {
|
protected void renderToEndOfStream() throws ExoPlaybackException {
|
||||||
try {
|
try {
|
||||||
audioTrack.playToEndOfStream();
|
audioSink.playToEndOfStream();
|
||||||
} catch (AudioTrack.WriteException e) {
|
} catch (AudioSink.WriteException e) {
|
||||||
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -419,11 +442,11 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
public void handleMessage(int messageType, Object message) throws ExoPlaybackException {
|
public void handleMessage(int messageType, Object message) throws ExoPlaybackException {
|
||||||
switch (messageType) {
|
switch (messageType) {
|
||||||
case C.MSG_SET_VOLUME:
|
case C.MSG_SET_VOLUME:
|
||||||
audioTrack.setVolume((Float) message);
|
audioSink.setVolume((Float) message);
|
||||||
break;
|
break;
|
||||||
case C.MSG_SET_AUDIO_ATTRIBUTES:
|
case C.MSG_SET_AUDIO_ATTRIBUTES:
|
||||||
AudioAttributes audioAttributes = (AudioAttributes) message;
|
AudioAttributes audioAttributes = (AudioAttributes) message;
|
||||||
audioTrack.setAudioAttributes(audioAttributes);
|
audioSink.setAudioAttributes(audioAttributes);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
super.handleMessage(messageType, message);
|
super.handleMessage(messageType, message);
|
||||||
@ -445,7 +468,7 @@ public class MediaCodecAudioRenderer extends MediaCodecRenderer implements Media
|
|||||||
|| Util.DEVICE.startsWith("heroqlte"));
|
|| Util.DEVICE.startsWith("heroqlte"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private final class AudioTrackListener implements AudioTrack.Listener {
|
private final class AudioSinkListener implements AudioSink.Listener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAudioSessionId(int audioSessionId) {
|
public void onAudioSessionId(int audioSessionId) {
|
||||||
|
@ -72,7 +72,7 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
private final DrmSessionManager<ExoMediaCrypto> drmSessionManager;
|
private final DrmSessionManager<ExoMediaCrypto> drmSessionManager;
|
||||||
private final boolean playClearSamplesWithoutKeys;
|
private final boolean playClearSamplesWithoutKeys;
|
||||||
private final EventDispatcher eventDispatcher;
|
private final EventDispatcher eventDispatcher;
|
||||||
private final AudioTrack audioTrack;
|
private final AudioSink audioSink;
|
||||||
private final FormatHolder formatHolder;
|
private final FormatHolder formatHolder;
|
||||||
private final DecoderInputBuffer flagsOnlyBuffer;
|
private final DecoderInputBuffer flagsOnlyBuffer;
|
||||||
|
|
||||||
@ -107,8 +107,8 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
* @param eventListener A listener of events. May be null if delivery of events is not required.
|
* @param eventListener A listener of events. May be null if delivery of events is not required.
|
||||||
* @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output.
|
* @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output.
|
||||||
*/
|
*/
|
||||||
public SimpleDecoderAudioRenderer(Handler eventHandler,
|
public SimpleDecoderAudioRenderer(Handler eventHandler, AudioRendererEventListener eventListener,
|
||||||
AudioRendererEventListener eventListener, AudioProcessor... audioProcessors) {
|
AudioProcessor... audioProcessors) {
|
||||||
this(eventHandler, eventListener, null, null, false, audioProcessors);
|
this(eventHandler, eventListener, null, null, false, audioProcessors);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,8 +119,8 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
|
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
|
||||||
* default capabilities (no encoded audio passthrough support) should be assumed.
|
* default capabilities (no encoded audio passthrough support) should be assumed.
|
||||||
*/
|
*/
|
||||||
public SimpleDecoderAudioRenderer(Handler eventHandler,
|
public SimpleDecoderAudioRenderer(Handler eventHandler, AudioRendererEventListener eventListener,
|
||||||
AudioRendererEventListener eventListener, AudioCapabilities audioCapabilities) {
|
AudioCapabilities audioCapabilities) {
|
||||||
this(eventHandler, eventListener, audioCapabilities, null, false);
|
this(eventHandler, eventListener, audioCapabilities, null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,15 +139,35 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
* has obtained the keys necessary to decrypt encrypted regions of the media.
|
* has obtained the keys necessary to decrypt encrypted regions of the media.
|
||||||
* @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output.
|
* @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output.
|
||||||
*/
|
*/
|
||||||
public SimpleDecoderAudioRenderer(Handler eventHandler,
|
public SimpleDecoderAudioRenderer(Handler eventHandler, AudioRendererEventListener eventListener,
|
||||||
AudioRendererEventListener eventListener, AudioCapabilities audioCapabilities,
|
AudioCapabilities audioCapabilities, DrmSessionManager<ExoMediaCrypto> drmSessionManager,
|
||||||
|
boolean playClearSamplesWithoutKeys, AudioProcessor... audioProcessors) {
|
||||||
|
this(eventHandler, eventListener, drmSessionManager,
|
||||||
|
playClearSamplesWithoutKeys, new DefaultAudioSink(audioCapabilities, audioProcessors));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
|
||||||
|
* null if delivery of events is not required.
|
||||||
|
* @param eventListener A listener of events. May be null if delivery of events is not required.
|
||||||
|
* @param drmSessionManager For use with encrypted media. May be null if support for encrypted
|
||||||
|
* media is not required.
|
||||||
|
* @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
|
||||||
|
* For example a media file may start with a short clear region so as to allow playback to
|
||||||
|
* begin in parallel with key acquisition. This parameter specifies whether the renderer is
|
||||||
|
* permitted to play clear regions of encrypted media files before {@code drmSessionManager}
|
||||||
|
* has obtained the keys necessary to decrypt encrypted regions of the media.
|
||||||
|
* @param audioSink The sink to which audio will be output.
|
||||||
|
*/
|
||||||
|
public SimpleDecoderAudioRenderer(Handler eventHandler, AudioRendererEventListener eventListener,
|
||||||
DrmSessionManager<ExoMediaCrypto> drmSessionManager, boolean playClearSamplesWithoutKeys,
|
DrmSessionManager<ExoMediaCrypto> drmSessionManager, boolean playClearSamplesWithoutKeys,
|
||||||
AudioProcessor... audioProcessors) {
|
AudioSink audioSink) {
|
||||||
super(C.TRACK_TYPE_AUDIO);
|
super(C.TRACK_TYPE_AUDIO);
|
||||||
this.drmSessionManager = drmSessionManager;
|
this.drmSessionManager = drmSessionManager;
|
||||||
this.playClearSamplesWithoutKeys = playClearSamplesWithoutKeys;
|
this.playClearSamplesWithoutKeys = playClearSamplesWithoutKeys;
|
||||||
eventDispatcher = new EventDispatcher(eventHandler, eventListener);
|
eventDispatcher = new EventDispatcher(eventHandler, eventListener);
|
||||||
audioTrack = new AudioTrack(audioCapabilities, audioProcessors, new AudioTrackListener());
|
this.audioSink = audioSink;
|
||||||
|
audioSink.setListener(new AudioSinkListener());
|
||||||
formatHolder = new FormatHolder();
|
formatHolder = new FormatHolder();
|
||||||
flagsOnlyBuffer = DecoderInputBuffer.newFlagsOnlyInstance();
|
flagsOnlyBuffer = DecoderInputBuffer.newFlagsOnlyInstance();
|
||||||
decoderReinitializationState = REINITIALIZATION_STATE_NONE;
|
decoderReinitializationState = REINITIALIZATION_STATE_NONE;
|
||||||
@ -184,8 +204,8 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
public void render(long positionUs, long elapsedRealtimeUs) throws ExoPlaybackException {
|
public void render(long positionUs, long elapsedRealtimeUs) throws ExoPlaybackException {
|
||||||
if (outputStreamEnded) {
|
if (outputStreamEnded) {
|
||||||
try {
|
try {
|
||||||
audioTrack.playToEndOfStream();
|
audioSink.playToEndOfStream();
|
||||||
} catch (AudioTrack.WriteException e) {
|
} catch (AudioSink.WriteException e) {
|
||||||
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
@ -220,8 +240,8 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
while (drainOutputBuffer()) {}
|
while (drainOutputBuffer()) {}
|
||||||
while (feedInputBuffer()) {}
|
while (feedInputBuffer()) {}
|
||||||
TraceUtil.endSection();
|
TraceUtil.endSection();
|
||||||
} catch (AudioDecoderException | AudioTrack.ConfigurationException
|
} catch (AudioDecoderException | AudioSink.ConfigurationException
|
||||||
| AudioTrack.InitializationException | AudioTrack.WriteException e) {
|
| AudioSink.InitializationException | AudioSink.WriteException e) {
|
||||||
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
throw ExoPlaybackException.createForRenderer(e, getIndex());
|
||||||
}
|
}
|
||||||
decoderCounters.ensureUpdated();
|
decoderCounters.ensureUpdated();
|
||||||
@ -234,21 +254,21 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
* order to spatialize the audio channels. For this use case, any {@link Virtualizer} instances
|
* order to spatialize the audio channels. For this use case, any {@link Virtualizer} instances
|
||||||
* should be released in {@link #onDisabled()} (if not before).
|
* should be released in {@link #onDisabled()} (if not before).
|
||||||
*
|
*
|
||||||
* @see AudioTrack.Listener#onAudioSessionId(int)
|
* @see AudioSink.Listener#onAudioSessionId(int)
|
||||||
*/
|
*/
|
||||||
protected void onAudioSessionId(int audioSessionId) {
|
protected void onAudioSessionId(int audioSessionId) {
|
||||||
// Do nothing.
|
// Do nothing.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see AudioTrack.Listener#onPositionDiscontinuity()
|
* @see AudioSink.Listener#onPositionDiscontinuity()
|
||||||
*/
|
*/
|
||||||
protected void onAudioTrackPositionDiscontinuity() {
|
protected void onAudioTrackPositionDiscontinuity() {
|
||||||
// Do nothing.
|
// Do nothing.
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see AudioTrack.Listener#onUnderrun(int, long, long)
|
* @see AudioSink.Listener#onUnderrun(int, long, long)
|
||||||
*/
|
*/
|
||||||
protected void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs,
|
protected void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs,
|
||||||
long elapsedSinceLastFeedMs) {
|
long elapsedSinceLastFeedMs) {
|
||||||
@ -282,8 +302,8 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
}
|
}
|
||||||
|
|
||||||
private boolean drainOutputBuffer() throws ExoPlaybackException, AudioDecoderException,
|
private boolean drainOutputBuffer() throws ExoPlaybackException, AudioDecoderException,
|
||||||
AudioTrack.ConfigurationException, AudioTrack.InitializationException,
|
AudioSink.ConfigurationException, AudioSink.InitializationException,
|
||||||
AudioTrack.WriteException {
|
AudioSink.WriteException {
|
||||||
if (outputBuffer == null) {
|
if (outputBuffer == null) {
|
||||||
outputBuffer = decoder.dequeueOutputBuffer();
|
outputBuffer = decoder.dequeueOutputBuffer();
|
||||||
if (outputBuffer == null) {
|
if (outputBuffer == null) {
|
||||||
@ -309,12 +329,12 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
|
|
||||||
if (audioTrackNeedsConfigure) {
|
if (audioTrackNeedsConfigure) {
|
||||||
Format outputFormat = getOutputFormat();
|
Format outputFormat = getOutputFormat();
|
||||||
audioTrack.configure(outputFormat.sampleMimeType, outputFormat.channelCount,
|
audioSink.configure(outputFormat.sampleMimeType, outputFormat.channelCount,
|
||||||
outputFormat.sampleRate, outputFormat.pcmEncoding, 0, null, encoderDelay, encoderPadding);
|
outputFormat.sampleRate, outputFormat.pcmEncoding, 0, null, encoderDelay, encoderPadding);
|
||||||
audioTrackNeedsConfigure = false;
|
audioTrackNeedsConfigure = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (audioTrack.handleBuffer(outputBuffer.data, outputBuffer.timeUs)) {
|
if (audioSink.handleBuffer(outputBuffer.data, outputBuffer.timeUs)) {
|
||||||
decoderCounters.renderedOutputBufferCount++;
|
decoderCounters.renderedOutputBufferCount++;
|
||||||
outputBuffer.release();
|
outputBuffer.release();
|
||||||
outputBuffer = null;
|
outputBuffer = null;
|
||||||
@ -394,8 +414,8 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
private void processEndOfStream() throws ExoPlaybackException {
|
private void processEndOfStream() throws ExoPlaybackException {
|
||||||
outputStreamEnded = true;
|
outputStreamEnded = true;
|
||||||
try {
|
try {
|
||||||
audioTrack.playToEndOfStream();
|
audioSink.playToEndOfStream();
|
||||||
} catch (AudioTrack.WriteException e) {
|
} catch (AudioSink.WriteException e) {
|
||||||
throw ExoPlaybackException.createForRenderer(drmSession.getError(), getIndex());
|
throw ExoPlaybackException.createForRenderer(drmSession.getError(), getIndex());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -418,19 +438,19 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isEnded() {
|
public boolean isEnded() {
|
||||||
return outputStreamEnded && audioTrack.isEnded();
|
return outputStreamEnded && audioSink.isEnded();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isReady() {
|
public boolean isReady() {
|
||||||
return audioTrack.hasPendingData()
|
return audioSink.hasPendingData()
|
||||||
|| (inputFormat != null && !waitingForKeys && (isSourceReady() || outputBuffer != null));
|
|| (inputFormat != null && !waitingForKeys && (isSourceReady() || outputBuffer != null));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getPositionUs() {
|
public long getPositionUs() {
|
||||||
long newCurrentPositionUs = audioTrack.getCurrentPositionUs(isEnded());
|
long newCurrentPositionUs = audioSink.getCurrentPositionUs(isEnded());
|
||||||
if (newCurrentPositionUs != AudioTrack.CURRENT_POSITION_NOT_SET) {
|
if (newCurrentPositionUs != AudioSink.CURRENT_POSITION_NOT_SET) {
|
||||||
currentPositionUs = allowPositionDiscontinuity ? newCurrentPositionUs
|
currentPositionUs = allowPositionDiscontinuity ? newCurrentPositionUs
|
||||||
: Math.max(currentPositionUs, newCurrentPositionUs);
|
: Math.max(currentPositionUs, newCurrentPositionUs);
|
||||||
allowPositionDiscontinuity = false;
|
allowPositionDiscontinuity = false;
|
||||||
@ -440,12 +460,12 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) {
|
public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) {
|
||||||
return audioTrack.setPlaybackParameters(playbackParameters);
|
return audioSink.setPlaybackParameters(playbackParameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PlaybackParameters getPlaybackParameters() {
|
public PlaybackParameters getPlaybackParameters() {
|
||||||
return audioTrack.getPlaybackParameters();
|
return audioSink.getPlaybackParameters();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -454,15 +474,15 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
eventDispatcher.enabled(decoderCounters);
|
eventDispatcher.enabled(decoderCounters);
|
||||||
int tunnelingAudioSessionId = getConfiguration().tunnelingAudioSessionId;
|
int tunnelingAudioSessionId = getConfiguration().tunnelingAudioSessionId;
|
||||||
if (tunnelingAudioSessionId != C.AUDIO_SESSION_ID_UNSET) {
|
if (tunnelingAudioSessionId != C.AUDIO_SESSION_ID_UNSET) {
|
||||||
audioTrack.enableTunnelingV21(tunnelingAudioSessionId);
|
audioSink.enableTunnelingV21(tunnelingAudioSessionId);
|
||||||
} else {
|
} else {
|
||||||
audioTrack.disableTunneling();
|
audioSink.disableTunneling();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onPositionReset(long positionUs, boolean joining) throws ExoPlaybackException {
|
protected void onPositionReset(long positionUs, boolean joining) throws ExoPlaybackException {
|
||||||
audioTrack.reset();
|
audioSink.reset();
|
||||||
currentPositionUs = positionUs;
|
currentPositionUs = positionUs;
|
||||||
allowPositionDiscontinuity = true;
|
allowPositionDiscontinuity = true;
|
||||||
inputStreamEnded = false;
|
inputStreamEnded = false;
|
||||||
@ -474,12 +494,12 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onStarted() {
|
protected void onStarted() {
|
||||||
audioTrack.play();
|
audioSink.play();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void onStopped() {
|
protected void onStopped() {
|
||||||
audioTrack.pause();
|
audioSink.pause();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -489,7 +509,7 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
waitingForKeys = false;
|
waitingForKeys = false;
|
||||||
try {
|
try {
|
||||||
releaseDecoder();
|
releaseDecoder();
|
||||||
audioTrack.release();
|
audioSink.release();
|
||||||
} finally {
|
} finally {
|
||||||
try {
|
try {
|
||||||
if (drmSession != null) {
|
if (drmSession != null) {
|
||||||
@ -599,11 +619,11 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
public void handleMessage(int messageType, Object message) throws ExoPlaybackException {
|
public void handleMessage(int messageType, Object message) throws ExoPlaybackException {
|
||||||
switch (messageType) {
|
switch (messageType) {
|
||||||
case C.MSG_SET_VOLUME:
|
case C.MSG_SET_VOLUME:
|
||||||
audioTrack.setVolume((Float) message);
|
audioSink.setVolume((Float) message);
|
||||||
break;
|
break;
|
||||||
case C.MSG_SET_AUDIO_ATTRIBUTES:
|
case C.MSG_SET_AUDIO_ATTRIBUTES:
|
||||||
AudioAttributes audioAttributes = (AudioAttributes) message;
|
AudioAttributes audioAttributes = (AudioAttributes) message;
|
||||||
audioTrack.setAudioAttributes(audioAttributes);
|
audioSink.setAudioAttributes(audioAttributes);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
super.handleMessage(messageType, message);
|
super.handleMessage(messageType, message);
|
||||||
@ -611,7 +631,7 @@ public abstract class SimpleDecoderAudioRenderer extends BaseRenderer implements
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private final class AudioTrackListener implements AudioTrack.Listener {
|
private final class AudioSinkListener implements AudioSink.Listener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAudioSessionId(int audioSessionId) {
|
public void onAudioSessionId(int audioSessionId) {
|
||||||
|
@ -55,7 +55,7 @@ import java.nio.ByteOrder;
|
|||||||
*
|
*
|
||||||
* @param trimStartSamples The number of audio samples to trim from the start of audio.
|
* @param trimStartSamples The number of audio samples to trim from the start of audio.
|
||||||
* @param trimEndSamples The number of audio samples to trim from the end of audio.
|
* @param trimEndSamples The number of audio samples to trim from the end of audio.
|
||||||
* @see AudioTrack#configure(String, int, int, int, int, int[], int, int)
|
* @see AudioSink#configure(String, int, int, int, int, int[], int, int)
|
||||||
*/
|
*/
|
||||||
public void setTrimSampleCount(int trimStartSamples, int trimEndSamples) {
|
public void setTrimSampleCount(int trimStartSamples, int trimEndSamples) {
|
||||||
this.trimStartSamples = trimStartSamples;
|
this.trimStartSamples = trimStartSamples;
|
||||||
|
@ -29,7 +29,7 @@ import com.google.android.exoplayer2.Player;
|
|||||||
import com.google.android.exoplayer2.RenderersFactory;
|
import com.google.android.exoplayer2.RenderersFactory;
|
||||||
import com.google.android.exoplayer2.SimpleExoPlayer;
|
import com.google.android.exoplayer2.SimpleExoPlayer;
|
||||||
import com.google.android.exoplayer2.audio.AudioRendererEventListener;
|
import com.google.android.exoplayer2.audio.AudioRendererEventListener;
|
||||||
import com.google.android.exoplayer2.audio.AudioTrack;
|
import com.google.android.exoplayer2.audio.DefaultAudioSink;
|
||||||
import com.google.android.exoplayer2.decoder.DecoderCounters;
|
import com.google.android.exoplayer2.decoder.DecoderCounters;
|
||||||
import com.google.android.exoplayer2.drm.DrmSessionManager;
|
import com.google.android.exoplayer2.drm.DrmSessionManager;
|
||||||
import com.google.android.exoplayer2.drm.FrameworkMediaCrypto;
|
import com.google.android.exoplayer2.drm.FrameworkMediaCrypto;
|
||||||
@ -53,10 +53,10 @@ public abstract class ExoHostedTest extends Player.DefaultEventListener implemen
|
|||||||
AudioRendererEventListener, VideoRendererEventListener {
|
AudioRendererEventListener, VideoRendererEventListener {
|
||||||
|
|
||||||
static {
|
static {
|
||||||
// ExoPlayer's AudioTrack class is able to work around spurious timestamps reported by the
|
// DefaultAudioSink is able to work around spurious timestamps reported by the platform (by
|
||||||
// platform (by ignoring them). Disable this workaround, since we're interested in testing
|
// ignoring them). Disable this workaround, since we're interested in testing that the
|
||||||
// that the underlying platform is behaving correctly.
|
// underlying platform is behaving correctly.
|
||||||
AudioTrack.failOnSpuriousAudioTimestamp = true;
|
DefaultAudioSink.failOnSpuriousAudioTimestamp = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final long MAX_PLAYING_TIME_DISCREPANCY_MS = 2000;
|
public static final long MAX_PLAYING_TIME_DISCREPANCY_MS = 2000;
|
||||||
@ -253,7 +253,7 @@ public abstract class ExoHostedTest extends Player.DefaultEventListener implemen
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onAudioTrackUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs) {
|
public void onAudioSinkUnderrun(int bufferSize, long bufferSizeMs, long elapsedSinceLastFeedMs) {
|
||||||
Log.e(tag, "audioTrackUnderrun [" + bufferSize + ", " + bufferSizeMs + ", "
|
Log.e(tag, "audioTrackUnderrun [" + bufferSize + ", " + bufferSizeMs + ", "
|
||||||
+ elapsedSinceLastFeedMs + "]", null);
|
+ elapsedSinceLastFeedMs + "]", null);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user