Invert the ownership scheme between SampleQueue and SampleMetadataQueue
Saves around 200 lines of code. High level overview: - Rename SampleQueue to SampleDataQueue. - Rename SampleMetadataQueue to SampleQueue. This CL should not introduce behavior changes. The only significant changes in synchronization should be: + Add synchronized keyword to isReady. - Seems to be necessary. + Add synchronized keyword to SampleQueue.sampleMetadata. - Before this change, SampleQueue.sampleMetadata could acquire the SampleMetadataQueue lock three times in a single method call. Other miscellaneous improvements: + Put all private methods at the bottom. + Move release() to the right category. PiperOrigin-RevId: 283795844
This commit is contained in:
parent
9974670cc7
commit
14897fb6df
@ -0,0 +1,466 @@
|
||||
/*
|
||||
* Copyright (C) 2019 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.google.android.exoplayer2.source;
|
||||
|
||||
import androidx.annotation.Nullable;
|
||||
import com.google.android.exoplayer2.C;
|
||||
import com.google.android.exoplayer2.decoder.DecoderInputBuffer;
|
||||
import com.google.android.exoplayer2.extractor.ExtractorInput;
|
||||
import com.google.android.exoplayer2.extractor.TrackOutput.CryptoData;
|
||||
import com.google.android.exoplayer2.source.SampleQueue.SampleExtrasHolder;
|
||||
import com.google.android.exoplayer2.upstream.Allocation;
|
||||
import com.google.android.exoplayer2.upstream.Allocator;
|
||||
import com.google.android.exoplayer2.util.ParsableByteArray;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/** A queue of media sample data. */
|
||||
/* package */ class SampleDataQueue {
|
||||
|
||||
private static final int INITIAL_SCRATCH_SIZE = 32;
|
||||
|
||||
private final Allocator allocator;
|
||||
private final int allocationLength;
|
||||
private final ParsableByteArray scratch;
|
||||
|
||||
// References into the linked list of allocations.
|
||||
private AllocationNode firstAllocationNode;
|
||||
private AllocationNode readAllocationNode;
|
||||
private AllocationNode writeAllocationNode;
|
||||
|
||||
// Accessed only by the loading thread (or the consuming thread when there is no loading thread).
|
||||
private long totalBytesWritten;
|
||||
|
||||
public SampleDataQueue(Allocator allocator) {
|
||||
this.allocator = allocator;
|
||||
allocationLength = allocator.getIndividualAllocationLength();
|
||||
scratch = new ParsableByteArray(INITIAL_SCRATCH_SIZE);
|
||||
firstAllocationNode = new AllocationNode(/* startPosition= */ 0, allocationLength);
|
||||
readAllocationNode = firstAllocationNode;
|
||||
writeAllocationNode = firstAllocationNode;
|
||||
}
|
||||
|
||||
// Called by the consuming thread, but only when there is no loading thread.
|
||||
|
||||
/** Clears all sample data. */
|
||||
public void reset() {
|
||||
clearAllocationNodes(firstAllocationNode);
|
||||
firstAllocationNode = new AllocationNode(0, allocationLength);
|
||||
readAllocationNode = firstAllocationNode;
|
||||
writeAllocationNode = firstAllocationNode;
|
||||
totalBytesWritten = 0;
|
||||
allocator.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Discards sample data bytes from the write side of the queue.
|
||||
*
|
||||
* @param totalBytesWritten The reduced total number of bytes written after the samples have been
|
||||
* discarded, or 0 if the queue is now empty.
|
||||
*/
|
||||
public void discardUpstreamSampleBytes(long totalBytesWritten) {
|
||||
this.totalBytesWritten = totalBytesWritten;
|
||||
if (this.totalBytesWritten == 0
|
||||
|| this.totalBytesWritten == firstAllocationNode.startPosition) {
|
||||
clearAllocationNodes(firstAllocationNode);
|
||||
firstAllocationNode = new AllocationNode(this.totalBytesWritten, allocationLength);
|
||||
readAllocationNode = firstAllocationNode;
|
||||
writeAllocationNode = firstAllocationNode;
|
||||
} else {
|
||||
// Find the last node containing at least 1 byte of data that we need to keep.
|
||||
AllocationNode lastNodeToKeep = firstAllocationNode;
|
||||
while (this.totalBytesWritten > lastNodeToKeep.endPosition) {
|
||||
lastNodeToKeep = lastNodeToKeep.next;
|
||||
}
|
||||
// Discard all subsequent nodes.
|
||||
AllocationNode firstNodeToDiscard = lastNodeToKeep.next;
|
||||
clearAllocationNodes(firstNodeToDiscard);
|
||||
// Reset the successor of the last node to be an uninitialized node.
|
||||
lastNodeToKeep.next = new AllocationNode(lastNodeToKeep.endPosition, allocationLength);
|
||||
// Update writeAllocationNode and readAllocationNode as necessary.
|
||||
writeAllocationNode =
|
||||
this.totalBytesWritten == lastNodeToKeep.endPosition
|
||||
? lastNodeToKeep.next
|
||||
: lastNodeToKeep;
|
||||
if (readAllocationNode == firstNodeToDiscard) {
|
||||
readAllocationNode = lastNodeToKeep.next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called by the consuming thread.
|
||||
|
||||
/** Rewinds the read position to the first sample in the queue. */
|
||||
public void rewind() {
|
||||
readAllocationNode = firstAllocationNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads data from the rolling buffer to populate a decoder input buffer.
|
||||
*
|
||||
* @param buffer The buffer to populate.
|
||||
* @param extrasHolder The extras holder whose offset should be read and subsequently adjusted.
|
||||
*/
|
||||
public void readToBuffer(DecoderInputBuffer buffer, SampleExtrasHolder extrasHolder) {
|
||||
// Read encryption data if the sample is encrypted.
|
||||
if (buffer.isEncrypted()) {
|
||||
readEncryptionData(buffer, extrasHolder);
|
||||
}
|
||||
// Read sample data, extracting supplemental data into a separate buffer if needed.
|
||||
if (buffer.hasSupplementalData()) {
|
||||
// If there is supplemental data, the sample data is prefixed by its size.
|
||||
scratch.reset(4);
|
||||
readData(extrasHolder.offset, scratch.data, 4);
|
||||
int sampleSize = scratch.readUnsignedIntToInt();
|
||||
extrasHolder.offset += 4;
|
||||
extrasHolder.size -= 4;
|
||||
|
||||
// Write the sample data.
|
||||
buffer.ensureSpaceForWrite(sampleSize);
|
||||
readData(extrasHolder.offset, buffer.data, sampleSize);
|
||||
extrasHolder.offset += sampleSize;
|
||||
extrasHolder.size -= sampleSize;
|
||||
|
||||
// Write the remaining data as supplemental data.
|
||||
buffer.resetSupplementalData(extrasHolder.size);
|
||||
readData(extrasHolder.offset, buffer.supplementalData, extrasHolder.size);
|
||||
} else {
|
||||
// Write the sample data.
|
||||
buffer.ensureSpaceForWrite(extrasHolder.size);
|
||||
readData(extrasHolder.offset, buffer.data, extrasHolder.size);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Advances the read position to the specified absolute position.
|
||||
*
|
||||
* @param absolutePosition The new absolute read position. May be {@link C#POSITION_UNSET}, in
|
||||
* which case calling this method is a no-op.
|
||||
*/
|
||||
public void discardDownstreamTo(long absolutePosition) {
|
||||
if (absolutePosition == C.POSITION_UNSET) {
|
||||
return;
|
||||
}
|
||||
while (absolutePosition >= firstAllocationNode.endPosition) {
|
||||
// Advance firstAllocationNode to the specified absolute position. Also clear nodes that are
|
||||
// advanced past, and return their underlying allocations to the allocator.
|
||||
allocator.release(firstAllocationNode.allocation);
|
||||
firstAllocationNode = firstAllocationNode.clear();
|
||||
}
|
||||
if (readAllocationNode.startPosition < firstAllocationNode.startPosition) {
|
||||
// We discarded the node referenced by readAllocationNode. We need to advance it to the first
|
||||
// remaining node.
|
||||
readAllocationNode = firstAllocationNode;
|
||||
}
|
||||
}
|
||||
|
||||
// Called by the loading thread.
|
||||
|
||||
public long getTotalBytesWritten() {
|
||||
return totalBytesWritten;
|
||||
}
|
||||
|
||||
public int sampleData(ExtractorInput input, int length, boolean allowEndOfInput)
|
||||
throws IOException, InterruptedException {
|
||||
length = preAppend(length);
|
||||
int bytesAppended =
|
||||
input.read(
|
||||
writeAllocationNode.allocation.data,
|
||||
writeAllocationNode.translateOffset(totalBytesWritten),
|
||||
length);
|
||||
if (bytesAppended == C.RESULT_END_OF_INPUT) {
|
||||
if (allowEndOfInput) {
|
||||
return C.RESULT_END_OF_INPUT;
|
||||
}
|
||||
throw new EOFException();
|
||||
}
|
||||
postAppend(bytesAppended);
|
||||
return bytesAppended;
|
||||
}
|
||||
|
||||
public void sampleData(ParsableByteArray buffer, int length) {
|
||||
while (length > 0) {
|
||||
int bytesAppended = preAppend(length);
|
||||
buffer.readBytes(
|
||||
writeAllocationNode.allocation.data,
|
||||
writeAllocationNode.translateOffset(totalBytesWritten),
|
||||
bytesAppended);
|
||||
length -= bytesAppended;
|
||||
postAppend(bytesAppended);
|
||||
}
|
||||
}
|
||||
|
||||
// Private methods.
|
||||
|
||||
/**
|
||||
* Reads encryption data for the current sample.
|
||||
*
|
||||
* <p>The encryption data is written into {@link DecoderInputBuffer#cryptoInfo}, and {@link
|
||||
* SampleExtrasHolder#size} is adjusted to subtract the number of bytes that were read. The same
|
||||
* value is added to {@link SampleExtrasHolder#offset}.
|
||||
*
|
||||
* @param buffer The buffer into which the encryption data should be written.
|
||||
* @param extrasHolder The extras holder whose offset should be read and subsequently adjusted.
|
||||
*/
|
||||
private void readEncryptionData(DecoderInputBuffer buffer, SampleExtrasHolder extrasHolder) {
|
||||
long offset = extrasHolder.offset;
|
||||
|
||||
// Read the signal byte.
|
||||
scratch.reset(1);
|
||||
readData(offset, scratch.data, 1);
|
||||
offset++;
|
||||
byte signalByte = scratch.data[0];
|
||||
boolean subsampleEncryption = (signalByte & 0x80) != 0;
|
||||
int ivSize = signalByte & 0x7F;
|
||||
|
||||
// Read the initialization vector.
|
||||
if (buffer.cryptoInfo.iv == null) {
|
||||
buffer.cryptoInfo.iv = new byte[16];
|
||||
}
|
||||
readData(offset, buffer.cryptoInfo.iv, ivSize);
|
||||
offset += ivSize;
|
||||
|
||||
// Read the subsample count, if present.
|
||||
int subsampleCount;
|
||||
if (subsampleEncryption) {
|
||||
scratch.reset(2);
|
||||
readData(offset, scratch.data, 2);
|
||||
offset += 2;
|
||||
subsampleCount = scratch.readUnsignedShort();
|
||||
} else {
|
||||
subsampleCount = 1;
|
||||
}
|
||||
|
||||
// Write the clear and encrypted subsample sizes.
|
||||
int[] clearDataSizes = buffer.cryptoInfo.numBytesOfClearData;
|
||||
if (clearDataSizes == null || clearDataSizes.length < subsampleCount) {
|
||||
clearDataSizes = new int[subsampleCount];
|
||||
}
|
||||
int[] encryptedDataSizes = buffer.cryptoInfo.numBytesOfEncryptedData;
|
||||
if (encryptedDataSizes == null || encryptedDataSizes.length < subsampleCount) {
|
||||
encryptedDataSizes = new int[subsampleCount];
|
||||
}
|
||||
if (subsampleEncryption) {
|
||||
int subsampleDataLength = 6 * subsampleCount;
|
||||
scratch.reset(subsampleDataLength);
|
||||
readData(offset, scratch.data, subsampleDataLength);
|
||||
offset += subsampleDataLength;
|
||||
scratch.setPosition(0);
|
||||
for (int i = 0; i < subsampleCount; i++) {
|
||||
clearDataSizes[i] = scratch.readUnsignedShort();
|
||||
encryptedDataSizes[i] = scratch.readUnsignedIntToInt();
|
||||
}
|
||||
} else {
|
||||
clearDataSizes[0] = 0;
|
||||
encryptedDataSizes[0] = extrasHolder.size - (int) (offset - extrasHolder.offset);
|
||||
}
|
||||
|
||||
// Populate the cryptoInfo.
|
||||
CryptoData cryptoData = extrasHolder.cryptoData;
|
||||
buffer.cryptoInfo.set(
|
||||
subsampleCount,
|
||||
clearDataSizes,
|
||||
encryptedDataSizes,
|
||||
cryptoData.encryptionKey,
|
||||
buffer.cryptoInfo.iv,
|
||||
cryptoData.cryptoMode,
|
||||
cryptoData.encryptedBlocks,
|
||||
cryptoData.clearBlocks);
|
||||
|
||||
// Adjust the offset and size to take into account the bytes read.
|
||||
int bytesRead = (int) (offset - extrasHolder.offset);
|
||||
extrasHolder.offset += bytesRead;
|
||||
extrasHolder.size -= bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads data from the front of the rolling buffer.
|
||||
*
|
||||
* @param absolutePosition The absolute position from which data should be read.
|
||||
* @param target The buffer into which data should be written.
|
||||
* @param length The number of bytes to read.
|
||||
*/
|
||||
private void readData(long absolutePosition, ByteBuffer target, int length) {
|
||||
advanceReadTo(absolutePosition);
|
||||
int remaining = length;
|
||||
while (remaining > 0) {
|
||||
int toCopy = Math.min(remaining, (int) (readAllocationNode.endPosition - absolutePosition));
|
||||
Allocation allocation = readAllocationNode.allocation;
|
||||
target.put(allocation.data, readAllocationNode.translateOffset(absolutePosition), toCopy);
|
||||
remaining -= toCopy;
|
||||
absolutePosition += toCopy;
|
||||
if (absolutePosition == readAllocationNode.endPosition) {
|
||||
readAllocationNode = readAllocationNode.next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads data from the front of the rolling buffer.
|
||||
*
|
||||
* @param absolutePosition The absolute position from which data should be read.
|
||||
* @param target The array into which data should be written.
|
||||
* @param length The number of bytes to read.
|
||||
*/
|
||||
private void readData(long absolutePosition, byte[] target, int length) {
|
||||
advanceReadTo(absolutePosition);
|
||||
int remaining = length;
|
||||
while (remaining > 0) {
|
||||
int toCopy = Math.min(remaining, (int) (readAllocationNode.endPosition - absolutePosition));
|
||||
Allocation allocation = readAllocationNode.allocation;
|
||||
System.arraycopy(
|
||||
allocation.data,
|
||||
readAllocationNode.translateOffset(absolutePosition),
|
||||
target,
|
||||
length - remaining,
|
||||
toCopy);
|
||||
remaining -= toCopy;
|
||||
absolutePosition += toCopy;
|
||||
if (absolutePosition == readAllocationNode.endPosition) {
|
||||
readAllocationNode = readAllocationNode.next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Advances the read position to the specified absolute position.
|
||||
*
|
||||
* @param absolutePosition The position to which {@link #readAllocationNode} should be advanced.
|
||||
*/
|
||||
private void advanceReadTo(long absolutePosition) {
|
||||
while (absolutePosition >= readAllocationNode.endPosition) {
|
||||
readAllocationNode = readAllocationNode.next;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears allocation nodes starting from {@code fromNode}.
|
||||
*
|
||||
* @param fromNode The node from which to clear.
|
||||
*/
|
||||
private void clearAllocationNodes(AllocationNode fromNode) {
|
||||
if (!fromNode.wasInitialized) {
|
||||
return;
|
||||
}
|
||||
// Bulk release allocations for performance (it's significantly faster when using
|
||||
// DefaultAllocator because the allocator's lock only needs to be acquired and released once)
|
||||
// [Internal: See b/29542039].
|
||||
int allocationCount =
|
||||
(writeAllocationNode.wasInitialized ? 1 : 0)
|
||||
+ ((int) (writeAllocationNode.startPosition - fromNode.startPosition)
|
||||
/ allocationLength);
|
||||
Allocation[] allocationsToRelease = new Allocation[allocationCount];
|
||||
AllocationNode currentNode = fromNode;
|
||||
for (int i = 0; i < allocationsToRelease.length; i++) {
|
||||
allocationsToRelease[i] = currentNode.allocation;
|
||||
currentNode = currentNode.clear();
|
||||
}
|
||||
allocator.release(allocationsToRelease);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before writing sample data to {@link #writeAllocationNode}. May cause {@link
|
||||
* #writeAllocationNode} to be initialized.
|
||||
*
|
||||
* @param length The number of bytes that the caller wishes to write.
|
||||
* @return The number of bytes that the caller is permitted to write, which may be less than
|
||||
* {@code length}.
|
||||
*/
|
||||
private int preAppend(int length) {
|
||||
if (!writeAllocationNode.wasInitialized) {
|
||||
writeAllocationNode.initialize(
|
||||
allocator.allocate(),
|
||||
new AllocationNode(writeAllocationNode.endPosition, allocationLength));
|
||||
}
|
||||
return Math.min(length, (int) (writeAllocationNode.endPosition - totalBytesWritten));
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after writing sample data. May cause {@link #writeAllocationNode} to be advanced.
|
||||
*
|
||||
* @param length The number of bytes that were written.
|
||||
*/
|
||||
private void postAppend(int length) {
|
||||
totalBytesWritten += length;
|
||||
if (totalBytesWritten == writeAllocationNode.endPosition) {
|
||||
writeAllocationNode = writeAllocationNode.next;
|
||||
}
|
||||
}
|
||||
|
||||
/** A node in a linked list of {@link Allocation}s held by the output. */
|
||||
private static final class AllocationNode {
|
||||
|
||||
/** The absolute position of the start of the data (inclusive). */
|
||||
public final long startPosition;
|
||||
/** The absolute position of the end of the data (exclusive). */
|
||||
public final long endPosition;
|
||||
/** Whether the node has been initialized. Remains true after {@link #clear()}. */
|
||||
public boolean wasInitialized;
|
||||
/** The {@link Allocation}, or {@code null} if the node is not initialized. */
|
||||
@Nullable public Allocation allocation;
|
||||
/**
|
||||
* The next {@link AllocationNode} in the list, or {@code null} if the node has not been
|
||||
* initialized. Remains set after {@link #clear()}.
|
||||
*/
|
||||
@Nullable public AllocationNode next;
|
||||
|
||||
/**
|
||||
* @param startPosition See {@link #startPosition}.
|
||||
* @param allocationLength The length of the {@link Allocation} with which this node will be
|
||||
* initialized.
|
||||
*/
|
||||
public AllocationNode(long startPosition, int allocationLength) {
|
||||
this.startPosition = startPosition;
|
||||
this.endPosition = startPosition + allocationLength;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the node.
|
||||
*
|
||||
* @param allocation The node's {@link Allocation}.
|
||||
* @param next The next {@link AllocationNode}.
|
||||
*/
|
||||
public void initialize(Allocation allocation, AllocationNode next) {
|
||||
this.allocation = allocation;
|
||||
this.next = next;
|
||||
wasInitialized = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the offset into the {@link #allocation}'s {@link Allocation#data} that corresponds to
|
||||
* the specified absolute position.
|
||||
*
|
||||
* @param absolutePosition The absolute position.
|
||||
* @return The corresponding offset into the allocation's data.
|
||||
*/
|
||||
public int translateOffset(long absolutePosition) {
|
||||
return (int) (absolutePosition - startPosition) + allocation.offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears {@link #allocation} and {@link #next}.
|
||||
*
|
||||
* @return The cleared next {@link AllocationNode}.
|
||||
*/
|
||||
public AllocationNode clear() {
|
||||
allocation = null;
|
||||
AllocationNode temp = next;
|
||||
next = null;
|
||||
return temp;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,721 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.google.android.exoplayer2.source;
|
||||
|
||||
import android.os.Looper;
|
||||
import androidx.annotation.Nullable;
|
||||
import com.google.android.exoplayer2.C;
|
||||
import com.google.android.exoplayer2.Format;
|
||||
import com.google.android.exoplayer2.FormatHolder;
|
||||
import com.google.android.exoplayer2.decoder.DecoderInputBuffer;
|
||||
import com.google.android.exoplayer2.drm.DrmInitData;
|
||||
import com.google.android.exoplayer2.drm.DrmSession;
|
||||
import com.google.android.exoplayer2.drm.DrmSessionManager;
|
||||
import com.google.android.exoplayer2.extractor.TrackOutput.CryptoData;
|
||||
import com.google.android.exoplayer2.util.Assertions;
|
||||
import com.google.android.exoplayer2.util.MimeTypes;
|
||||
import com.google.android.exoplayer2.util.Util;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A queue of metadata describing the contents of a media buffer.
|
||||
*/
|
||||
/* package */ final class SampleMetadataQueue {
|
||||
|
||||
/**
|
||||
* A holder for sample metadata not held by {@link DecoderInputBuffer}.
|
||||
*/
|
||||
public static final class SampleExtrasHolder {
|
||||
|
||||
public int size;
|
||||
public long offset;
|
||||
public CryptoData cryptoData;
|
||||
|
||||
}
|
||||
|
||||
private static final int SAMPLE_CAPACITY_INCREMENT = 1000;
|
||||
|
||||
private final DrmSessionManager<?> drmSessionManager;
|
||||
|
||||
@Nullable private Format downstreamFormat;
|
||||
@Nullable private DrmSession<?> currentDrmSession;
|
||||
|
||||
private int capacity;
|
||||
private int[] sourceIds;
|
||||
private long[] offsets;
|
||||
private int[] sizes;
|
||||
private int[] flags;
|
||||
private long[] timesUs;
|
||||
private CryptoData[] cryptoDatas;
|
||||
private Format[] formats;
|
||||
|
||||
private int length;
|
||||
private int absoluteFirstIndex;
|
||||
private int relativeFirstIndex;
|
||||
private int readPosition;
|
||||
|
||||
private long largestDiscardedTimestampUs;
|
||||
private long largestQueuedTimestampUs;
|
||||
private boolean isLastSampleQueued;
|
||||
private boolean upstreamKeyframeRequired;
|
||||
private boolean upstreamFormatRequired;
|
||||
private Format upstreamFormat;
|
||||
private Format upstreamCommittedFormat;
|
||||
private int upstreamSourceId;
|
||||
|
||||
public SampleMetadataQueue(DrmSessionManager<?> drmSessionManager) {
|
||||
this.drmSessionManager = drmSessionManager;
|
||||
capacity = SAMPLE_CAPACITY_INCREMENT;
|
||||
sourceIds = new int[capacity];
|
||||
offsets = new long[capacity];
|
||||
timesUs = new long[capacity];
|
||||
flags = new int[capacity];
|
||||
sizes = new int[capacity];
|
||||
cryptoDatas = new CryptoData[capacity];
|
||||
formats = new Format[capacity];
|
||||
largestDiscardedTimestampUs = Long.MIN_VALUE;
|
||||
largestQueuedTimestampUs = Long.MIN_VALUE;
|
||||
upstreamFormatRequired = true;
|
||||
upstreamKeyframeRequired = true;
|
||||
}
|
||||
|
||||
// Called by the consuming thread, but only when there is no loading thread.
|
||||
|
||||
/**
|
||||
* Clears all sample metadata from the queue.
|
||||
*
|
||||
* @param resetUpstreamFormat Whether the upstream format should be cleared. If set to false,
|
||||
* samples queued after the reset (and before a subsequent call to {@link #format(Format)})
|
||||
* are assumed to have the current upstream format. If set to true, {@link #format(Format)}
|
||||
* must be called after the reset before any more samples can be queued.
|
||||
*/
|
||||
public void reset(boolean resetUpstreamFormat) {
|
||||
length = 0;
|
||||
absoluteFirstIndex = 0;
|
||||
relativeFirstIndex = 0;
|
||||
readPosition = 0;
|
||||
upstreamKeyframeRequired = true;
|
||||
largestDiscardedTimestampUs = Long.MIN_VALUE;
|
||||
largestQueuedTimestampUs = Long.MIN_VALUE;
|
||||
isLastSampleQueued = false;
|
||||
upstreamCommittedFormat = null;
|
||||
if (resetUpstreamFormat) {
|
||||
upstreamFormat = null;
|
||||
upstreamFormatRequired = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current absolute write index.
|
||||
*/
|
||||
public int getWriteIndex() {
|
||||
return absoluteFirstIndex + length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Discards samples from the write side of the queue.
|
||||
*
|
||||
* @param discardFromIndex The absolute index of the first sample to be discarded.
|
||||
* @return The reduced total number of bytes written after the samples have been discarded, or 0
|
||||
* if the queue is now empty.
|
||||
*/
|
||||
public long discardUpstreamSamples(int discardFromIndex) {
|
||||
int discardCount = getWriteIndex() - discardFromIndex;
|
||||
Assertions.checkArgument(0 <= discardCount && discardCount <= (length - readPosition));
|
||||
length -= discardCount;
|
||||
largestQueuedTimestampUs = Math.max(largestDiscardedTimestampUs, getLargestTimestamp(length));
|
||||
isLastSampleQueued = discardCount == 0 && isLastSampleQueued;
|
||||
if (length == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
int relativeLastWriteIndex = getRelativeIndex(length - 1);
|
||||
return offsets[relativeLastWriteIndex] + sizes[relativeLastWriteIndex];
|
||||
}
|
||||
}
|
||||
|
||||
public void sourceId(int sourceId) {
|
||||
upstreamSourceId = sourceId;
|
||||
}
|
||||
|
||||
// Called by the consuming thread.
|
||||
|
||||
/**
|
||||
* Throws an error that's preventing data from being read. Does nothing if no such error exists.
|
||||
*
|
||||
* @throws IOException The underlying error.
|
||||
*/
|
||||
public void maybeThrowError() throws IOException {
|
||||
// TODO: Avoid throwing if the DRM error is not preventing a read operation.
|
||||
if (currentDrmSession != null && currentDrmSession.getState() == DrmSession.STATE_ERROR) {
|
||||
throw Assertions.checkNotNull(currentDrmSession.getError());
|
||||
}
|
||||
}
|
||||
|
||||
/** Releases any owned {@link DrmSession} references. */
|
||||
public void releaseDrmSessionReferences() {
|
||||
if (currentDrmSession != null) {
|
||||
currentDrmSession.release();
|
||||
currentDrmSession = null;
|
||||
// Clear downstream format to avoid violating the assumption that downstreamFormat.drmInitData
|
||||
// != null implies currentSession != null
|
||||
downstreamFormat = null;
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns the current absolute start index. */
|
||||
public int getFirstIndex() {
|
||||
return absoluteFirstIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current absolute read index.
|
||||
*/
|
||||
public int getReadIndex() {
|
||||
return absoluteFirstIndex + readPosition;
|
||||
}
|
||||
|
||||
/**
|
||||
* Peeks the source id of the next sample to be read, or the current upstream source id if the
|
||||
* queue is empty or if the read position is at the end of the queue.
|
||||
*
|
||||
* @return The source id.
|
||||
*/
|
||||
public synchronized int peekSourceId() {
|
||||
int relativeReadIndex = getRelativeIndex(readPosition);
|
||||
return hasNextSample() ? sourceIds[relativeReadIndex] : upstreamSourceId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the upstream {@link Format} in which samples are being queued.
|
||||
*/
|
||||
public synchronized Format getUpstreamFormat() {
|
||||
return upstreamFormatRequired ? null : upstreamFormat;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the largest sample timestamp that has been queued since the last call to
|
||||
* {@link #reset(boolean)}.
|
||||
* <p>
|
||||
* Samples that were discarded by calling {@link #discardUpstreamSamples(int)} are not
|
||||
* considered as having been queued. Samples that were dequeued from the front of the queue are
|
||||
* considered as having been queued.
|
||||
*
|
||||
* @return The largest sample timestamp that has been queued, or {@link Long#MIN_VALUE} if no
|
||||
* samples have been queued.
|
||||
*/
|
||||
public synchronized long getLargestQueuedTimestampUs() {
|
||||
return largestQueuedTimestampUs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the last sample of the stream has knowingly been queued. A return value of
|
||||
* {@code false} means that the last sample had not been queued or that it's unknown whether the
|
||||
* last sample has been queued.
|
||||
*
|
||||
* <p>Samples that were discarded by calling {@link #discardUpstreamSamples(int)} are not
|
||||
* considered as having been queued. Samples that were dequeued from the front of the queue are
|
||||
* considered as having been queued.
|
||||
*/
|
||||
public synchronized boolean isLastSampleQueued() {
|
||||
return isLastSampleQueued;
|
||||
}
|
||||
|
||||
/** Returns the timestamp of the first sample, or {@link Long#MIN_VALUE} if the queue is empty. */
|
||||
public synchronized long getFirstTimestampUs() {
|
||||
return length == 0 ? Long.MIN_VALUE : timesUs[relativeFirstIndex];
|
||||
}
|
||||
|
||||
/**
|
||||
* Rewinds the read position to the first sample retained in the queue.
|
||||
*/
|
||||
public synchronized void rewind() {
|
||||
readPosition = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether there is data available for reading.
|
||||
*
|
||||
* <p>Note: If the stream has ended then a buffer with the end of stream flag can always be read
|
||||
* from {@link #read}. Hence an ended stream is always ready.
|
||||
*
|
||||
* @param loadingFinished Whether no more samples will be written to the sample queue. When true,
|
||||
* this method returns true if the sample queue is empty, because an empty sample queue means
|
||||
* the end of stream has been reached. When false, this method returns false if the sample
|
||||
* queue is empty.
|
||||
*/
|
||||
public boolean isReady(boolean loadingFinished) {
|
||||
if (!hasNextSample()) {
|
||||
return loadingFinished
|
||||
|| isLastSampleQueued
|
||||
|| (upstreamFormat != null && upstreamFormat != downstreamFormat);
|
||||
}
|
||||
int relativeReadIndex = getRelativeIndex(readPosition);
|
||||
if (formats[relativeReadIndex] != downstreamFormat) {
|
||||
// A format can be read.
|
||||
return true;
|
||||
}
|
||||
return mayReadSample(relativeReadIndex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to read from the queue.
|
||||
*
|
||||
* @param formatHolder A {@link FormatHolder} to populate in the case of reading a format.
|
||||
* @param buffer A {@link DecoderInputBuffer} to populate in the case of reading a sample or the
|
||||
* end of the stream. If a sample is read then the buffer is populated with information about
|
||||
* the sample, but not its data. The size and absolute position of the data in the rolling
|
||||
* buffer is stored in {@code extrasHolder}, along with an encryption id if present and the
|
||||
* absolute position of the first byte that may still be required after the current sample has
|
||||
* been read. If a {@link DecoderInputBuffer#isFlagsOnly() flags-only} buffer is passed, only
|
||||
* the buffer flags may be populated by this method and the read position of the queue will
|
||||
* not change. May be null if the caller requires that the format of the stream be read even
|
||||
* if it's not changing.
|
||||
* @param formatRequired Whether the caller requires that the format of the stream be read even if
|
||||
* it's not changing. A sample will never be read if set to true, however it is still possible
|
||||
* for the end of stream or nothing to be read.
|
||||
* @param loadingFinished True if an empty queue should be considered the end of the stream.
|
||||
* @param extrasHolder The holder into which extra sample information should be written.
|
||||
* @return The result, which can be {@link C#RESULT_NOTHING_READ}, {@link C#RESULT_FORMAT_READ} or
|
||||
* {@link C#RESULT_BUFFER_READ}.
|
||||
*/
|
||||
@SuppressWarnings("ReferenceEquality")
|
||||
public synchronized int read(
|
||||
FormatHolder formatHolder,
|
||||
DecoderInputBuffer buffer,
|
||||
boolean formatRequired,
|
||||
boolean loadingFinished,
|
||||
SampleExtrasHolder extrasHolder) {
|
||||
if (!hasNextSample()) {
|
||||
if (loadingFinished || isLastSampleQueued) {
|
||||
buffer.setFlags(C.BUFFER_FLAG_END_OF_STREAM);
|
||||
return C.RESULT_BUFFER_READ;
|
||||
} else if (upstreamFormat != null && (formatRequired || upstreamFormat != downstreamFormat)) {
|
||||
onFormatResult(Assertions.checkNotNull(upstreamFormat), formatHolder);
|
||||
return C.RESULT_FORMAT_READ;
|
||||
} else {
|
||||
return C.RESULT_NOTHING_READ;
|
||||
}
|
||||
}
|
||||
|
||||
int relativeReadIndex = getRelativeIndex(readPosition);
|
||||
if (formatRequired || formats[relativeReadIndex] != downstreamFormat) {
|
||||
onFormatResult(formats[relativeReadIndex], formatHolder);
|
||||
return C.RESULT_FORMAT_READ;
|
||||
}
|
||||
|
||||
if (!mayReadSample(relativeReadIndex)) {
|
||||
return C.RESULT_NOTHING_READ;
|
||||
}
|
||||
|
||||
buffer.setFlags(flags[relativeReadIndex]);
|
||||
buffer.timeUs = timesUs[relativeReadIndex];
|
||||
if (buffer.isFlagsOnly()) {
|
||||
return C.RESULT_BUFFER_READ;
|
||||
}
|
||||
|
||||
extrasHolder.size = sizes[relativeReadIndex];
|
||||
extrasHolder.offset = offsets[relativeReadIndex];
|
||||
extrasHolder.cryptoData = cryptoDatas[relativeReadIndex];
|
||||
|
||||
readPosition++;
|
||||
return C.RESULT_BUFFER_READ;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to advance the read position to the sample before or at the specified time.
|
||||
*
|
||||
* @param timeUs The time to advance to.
|
||||
* @param toKeyframe If true then attempts to advance to the keyframe before or at the specified
|
||||
* time, rather than to any sample before or at that time.
|
||||
* @param allowTimeBeyondBuffer Whether the operation can succeed if {@code timeUs} is beyond the
|
||||
* end of the queue, by advancing the read position to the last sample (or keyframe) in the
|
||||
* queue.
|
||||
* @return The number of samples that were skipped if the operation was successful, which may be
|
||||
* equal to 0, or {@link SampleQueue#ADVANCE_FAILED} if the operation was not successful. A
|
||||
* successful advance is one in which the read position was unchanged or advanced, and is now
|
||||
* at a sample meeting the specified criteria.
|
||||
*/
|
||||
public synchronized int advanceTo(long timeUs, boolean toKeyframe,
|
||||
boolean allowTimeBeyondBuffer) {
|
||||
int relativeReadIndex = getRelativeIndex(readPosition);
|
||||
if (!hasNextSample() || timeUs < timesUs[relativeReadIndex]
|
||||
|| (timeUs > largestQueuedTimestampUs && !allowTimeBeyondBuffer)) {
|
||||
return SampleQueue.ADVANCE_FAILED;
|
||||
}
|
||||
int offset = findSampleBefore(relativeReadIndex, length - readPosition, timeUs, toKeyframe);
|
||||
if (offset == -1) {
|
||||
return SampleQueue.ADVANCE_FAILED;
|
||||
}
|
||||
readPosition += offset;
|
||||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Advances the read position to the end of the queue.
|
||||
*
|
||||
* @return The number of samples that were skipped.
|
||||
*/
|
||||
public synchronized int advanceToEnd() {
|
||||
int skipCount = length - readPosition;
|
||||
readPosition = length;
|
||||
return skipCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to set the read position to the specified sample index.
|
||||
*
|
||||
* @param sampleIndex The sample index.
|
||||
* @return Whether the read position was set successfully. False is returned if the specified
|
||||
* index is smaller than the index of the first sample in the queue, or larger than the index
|
||||
* of the next sample that will be written.
|
||||
*/
|
||||
public synchronized boolean setReadPosition(int sampleIndex) {
|
||||
if (absoluteFirstIndex <= sampleIndex && sampleIndex <= absoluteFirstIndex + length) {
|
||||
readPosition = sampleIndex - absoluteFirstIndex;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Discards up to but not including the sample immediately before or at the specified time.
|
||||
*
|
||||
* @param timeUs The time to discard up to.
|
||||
* @param toKeyframe If true then discards samples up to the keyframe before or at the specified
|
||||
* time, rather than just any sample before or at that time.
|
||||
* @param stopAtReadPosition If true then samples are only discarded if they're before the read
|
||||
* position. If false then samples at and beyond the read position may be discarded, in which
|
||||
* case the read position is advanced to the first remaining sample.
|
||||
* @return The corresponding offset up to which data should be discarded, or
|
||||
* {@link C#POSITION_UNSET} if no discarding of data is necessary.
|
||||
*/
|
||||
public synchronized long discardTo(long timeUs, boolean toKeyframe, boolean stopAtReadPosition) {
|
||||
if (length == 0 || timeUs < timesUs[relativeFirstIndex]) {
|
||||
return C.POSITION_UNSET;
|
||||
}
|
||||
int searchLength = stopAtReadPosition && readPosition != length ? readPosition + 1 : length;
|
||||
int discardCount = findSampleBefore(relativeFirstIndex, searchLength, timeUs, toKeyframe);
|
||||
if (discardCount == -1) {
|
||||
return C.POSITION_UNSET;
|
||||
}
|
||||
return discardSamples(discardCount);
|
||||
}
|
||||
|
||||
/**
|
||||
* Discards samples up to but not including the read position.
|
||||
*
|
||||
* @return The corresponding offset up to which data should be discarded, or
|
||||
* {@link C#POSITION_UNSET} if no discarding of data is necessary.
|
||||
*/
|
||||
public synchronized long discardToRead() {
|
||||
if (readPosition == 0) {
|
||||
return C.POSITION_UNSET;
|
||||
}
|
||||
return discardSamples(readPosition);
|
||||
}
|
||||
|
||||
/**
|
||||
* Discards all samples in the queue. The read position is also advanced.
|
||||
*
|
||||
* @return The corresponding offset up to which data should be discarded, or
|
||||
* {@link C#POSITION_UNSET} if no discarding of data is necessary.
|
||||
*/
|
||||
public synchronized long discardToEnd() {
|
||||
if (length == 0) {
|
||||
return C.POSITION_UNSET;
|
||||
}
|
||||
return discardSamples(length);
|
||||
}
|
||||
|
||||
// Called by the loading thread.
|
||||
|
||||
public synchronized boolean format(Format format) {
|
||||
if (format == null) {
|
||||
upstreamFormatRequired = true;
|
||||
return false;
|
||||
}
|
||||
upstreamFormatRequired = false;
|
||||
if (Util.areEqual(format, upstreamFormat)) {
|
||||
// The format is unchanged. If format and upstreamFormat are different objects, we keep the
|
||||
// current upstreamFormat so we can detect format changes in read() using cheap referential
|
||||
// equality.
|
||||
return false;
|
||||
} else if (Util.areEqual(format, upstreamCommittedFormat)) {
|
||||
// The format has changed back to the format of the last committed sample. If they are
|
||||
// different objects, we revert back to using upstreamCommittedFormat as the upstreamFormat so
|
||||
// we can detect format changes in read() using cheap referential equality.
|
||||
upstreamFormat = upstreamCommittedFormat;
|
||||
return true;
|
||||
} else {
|
||||
upstreamFormat = format;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void commitSample(long timeUs, @C.BufferFlags int sampleFlags, long offset,
|
||||
int size, CryptoData cryptoData) {
|
||||
if (upstreamKeyframeRequired) {
|
||||
if ((sampleFlags & C.BUFFER_FLAG_KEY_FRAME) == 0) {
|
||||
return;
|
||||
}
|
||||
upstreamKeyframeRequired = false;
|
||||
}
|
||||
Assertions.checkState(!upstreamFormatRequired);
|
||||
|
||||
isLastSampleQueued = (sampleFlags & C.BUFFER_FLAG_LAST_SAMPLE) != 0;
|
||||
largestQueuedTimestampUs = Math.max(largestQueuedTimestampUs, timeUs);
|
||||
|
||||
int relativeEndIndex = getRelativeIndex(length);
|
||||
timesUs[relativeEndIndex] = timeUs;
|
||||
offsets[relativeEndIndex] = offset;
|
||||
sizes[relativeEndIndex] = size;
|
||||
flags[relativeEndIndex] = sampleFlags;
|
||||
cryptoDatas[relativeEndIndex] = cryptoData;
|
||||
formats[relativeEndIndex] = upstreamFormat;
|
||||
sourceIds[relativeEndIndex] = upstreamSourceId;
|
||||
upstreamCommittedFormat = upstreamFormat;
|
||||
|
||||
length++;
|
||||
if (length == capacity) {
|
||||
// Increase the capacity.
|
||||
int newCapacity = capacity + SAMPLE_CAPACITY_INCREMENT;
|
||||
int[] newSourceIds = new int[newCapacity];
|
||||
long[] newOffsets = new long[newCapacity];
|
||||
long[] newTimesUs = new long[newCapacity];
|
||||
int[] newFlags = new int[newCapacity];
|
||||
int[] newSizes = new int[newCapacity];
|
||||
CryptoData[] newCryptoDatas = new CryptoData[newCapacity];
|
||||
Format[] newFormats = new Format[newCapacity];
|
||||
int beforeWrap = capacity - relativeFirstIndex;
|
||||
System.arraycopy(offsets, relativeFirstIndex, newOffsets, 0, beforeWrap);
|
||||
System.arraycopy(timesUs, relativeFirstIndex, newTimesUs, 0, beforeWrap);
|
||||
System.arraycopy(flags, relativeFirstIndex, newFlags, 0, beforeWrap);
|
||||
System.arraycopy(sizes, relativeFirstIndex, newSizes, 0, beforeWrap);
|
||||
System.arraycopy(cryptoDatas, relativeFirstIndex, newCryptoDatas, 0, beforeWrap);
|
||||
System.arraycopy(formats, relativeFirstIndex, newFormats, 0, beforeWrap);
|
||||
System.arraycopy(sourceIds, relativeFirstIndex, newSourceIds, 0, beforeWrap);
|
||||
int afterWrap = relativeFirstIndex;
|
||||
System.arraycopy(offsets, 0, newOffsets, beforeWrap, afterWrap);
|
||||
System.arraycopy(timesUs, 0, newTimesUs, beforeWrap, afterWrap);
|
||||
System.arraycopy(flags, 0, newFlags, beforeWrap, afterWrap);
|
||||
System.arraycopy(sizes, 0, newSizes, beforeWrap, afterWrap);
|
||||
System.arraycopy(cryptoDatas, 0, newCryptoDatas, beforeWrap, afterWrap);
|
||||
System.arraycopy(formats, 0, newFormats, beforeWrap, afterWrap);
|
||||
System.arraycopy(sourceIds, 0, newSourceIds, beforeWrap, afterWrap);
|
||||
offsets = newOffsets;
|
||||
timesUs = newTimesUs;
|
||||
flags = newFlags;
|
||||
sizes = newSizes;
|
||||
cryptoDatas = newCryptoDatas;
|
||||
formats = newFormats;
|
||||
sourceIds = newSourceIds;
|
||||
relativeFirstIndex = 0;
|
||||
length = capacity;
|
||||
capacity = newCapacity;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to discard samples from the end of the queue to allow samples starting from the
|
||||
* specified timestamp to be spliced in. Samples will not be discarded prior to the read position.
|
||||
*
|
||||
* @param timeUs The timestamp at which the splice occurs.
|
||||
* @return Whether the splice was successful.
|
||||
*/
|
||||
public synchronized boolean attemptSplice(long timeUs) {
|
||||
if (length == 0) {
|
||||
return timeUs > largestDiscardedTimestampUs;
|
||||
}
|
||||
long largestReadTimestampUs = Math.max(largestDiscardedTimestampUs,
|
||||
getLargestTimestamp(readPosition));
|
||||
if (largestReadTimestampUs >= timeUs) {
|
||||
return false;
|
||||
}
|
||||
int retainCount = length;
|
||||
int relativeSampleIndex = getRelativeIndex(length - 1);
|
||||
while (retainCount > readPosition && timesUs[relativeSampleIndex] >= timeUs) {
|
||||
retainCount--;
|
||||
relativeSampleIndex--;
|
||||
if (relativeSampleIndex == -1) {
|
||||
relativeSampleIndex = capacity - 1;
|
||||
}
|
||||
}
|
||||
discardUpstreamSamples(absoluteFirstIndex + retainCount);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Internal methods.
|
||||
|
||||
private boolean hasNextSample() {
|
||||
return readPosition != length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the downstream format, performs DRM resource management, and populates the {@code
|
||||
* outputFormatHolder}.
|
||||
*
|
||||
* @param newFormat The new downstream format.
|
||||
* @param outputFormatHolder The output {@link FormatHolder}.
|
||||
*/
|
||||
private void onFormatResult(Format newFormat, FormatHolder outputFormatHolder) {
|
||||
outputFormatHolder.format = newFormat;
|
||||
boolean isFirstFormat = downstreamFormat == null;
|
||||
DrmInitData oldDrmInitData = isFirstFormat ? null : downstreamFormat.drmInitData;
|
||||
downstreamFormat = newFormat;
|
||||
if (drmSessionManager == DrmSessionManager.DUMMY) {
|
||||
// Avoid attempting to acquire a session using the dummy DRM session manager. It's likely that
|
||||
// the media source creation has not yet been migrated and the renderer can acquire the
|
||||
// session for the read DRM init data.
|
||||
// TODO: Remove once renderers are migrated [Internal ref: b/122519809].
|
||||
return;
|
||||
}
|
||||
DrmInitData newDrmInitData = newFormat.drmInitData;
|
||||
outputFormatHolder.includesDrmSession = true;
|
||||
outputFormatHolder.drmSession = currentDrmSession;
|
||||
if (!isFirstFormat && Util.areEqual(oldDrmInitData, newDrmInitData)) {
|
||||
// Nothing to do.
|
||||
return;
|
||||
}
|
||||
// Ensure we acquire the new session before releasing the previous one in case the same session
|
||||
// is being used for both DrmInitData.
|
||||
DrmSession<?> previousSession = currentDrmSession;
|
||||
Looper playbackLooper = Assertions.checkNotNull(Looper.myLooper());
|
||||
currentDrmSession =
|
||||
newDrmInitData != null
|
||||
? drmSessionManager.acquireSession(playbackLooper, newDrmInitData)
|
||||
: drmSessionManager.acquirePlaceholderSession(
|
||||
playbackLooper, MimeTypes.getTrackType(newFormat.sampleMimeType));
|
||||
outputFormatHolder.drmSession = currentDrmSession;
|
||||
|
||||
if (previousSession != null) {
|
||||
previousSession.release();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether it's possible to read the next sample.
|
||||
*
|
||||
* @param relativeReadIndex The relative read index of the next sample.
|
||||
* @return Whether it's possible to read the next sample.
|
||||
*/
|
||||
private boolean mayReadSample(int relativeReadIndex) {
|
||||
if (drmSessionManager == DrmSessionManager.DUMMY) {
|
||||
// TODO: Remove once renderers are migrated [Internal ref: b/122519809].
|
||||
// For protected content it's likely that the DrmSessionManager is still being injected into
|
||||
// the renderers. We assume that the renderers will be able to acquire a DrmSession if needed.
|
||||
return true;
|
||||
}
|
||||
return currentDrmSession == null
|
||||
|| currentDrmSession.getState() == DrmSession.STATE_OPENED_WITH_KEYS
|
||||
|| ((flags[relativeReadIndex] & C.BUFFER_FLAG_ENCRYPTED) == 0
|
||||
&& currentDrmSession.playClearSamplesWithoutKeys());
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the sample in the specified range that's before or at the specified time. If {@code
|
||||
* keyframe} is {@code true} then the sample is additionally required to be a keyframe.
|
||||
*
|
||||
* @param relativeStartIndex The relative index from which to start searching.
|
||||
* @param length The length of the range being searched.
|
||||
* @param timeUs The specified time.
|
||||
* @param keyframe Whether only keyframes should be considered.
|
||||
* @return The offset from {@code relativeFirstIndex} to the found sample, or -1 if no matching
|
||||
* sample was found.
|
||||
*/
|
||||
private int findSampleBefore(int relativeStartIndex, int length, long timeUs, boolean keyframe) {
|
||||
// This could be optimized to use a binary search, however in practice callers to this method
|
||||
// normally pass times near to the start of the search region. Hence it's unclear whether
|
||||
// switching to a binary search would yield any real benefit.
|
||||
int sampleCountToTarget = -1;
|
||||
int searchIndex = relativeStartIndex;
|
||||
for (int i = 0; i < length && timesUs[searchIndex] <= timeUs; i++) {
|
||||
if (!keyframe || (flags[searchIndex] & C.BUFFER_FLAG_KEY_FRAME) != 0) {
|
||||
// We've found a suitable sample.
|
||||
sampleCountToTarget = i;
|
||||
}
|
||||
searchIndex++;
|
||||
if (searchIndex == capacity) {
|
||||
searchIndex = 0;
|
||||
}
|
||||
}
|
||||
return sampleCountToTarget;
|
||||
}
|
||||
|
||||
/**
|
||||
* Discards the specified number of samples.
|
||||
*
|
||||
* @param discardCount The number of samples to discard.
|
||||
* @return The corresponding offset up to which data should be discarded.
|
||||
*/
|
||||
private long discardSamples(int discardCount) {
|
||||
largestDiscardedTimestampUs = Math.max(largestDiscardedTimestampUs,
|
||||
getLargestTimestamp(discardCount));
|
||||
length -= discardCount;
|
||||
absoluteFirstIndex += discardCount;
|
||||
relativeFirstIndex += discardCount;
|
||||
if (relativeFirstIndex >= capacity) {
|
||||
relativeFirstIndex -= capacity;
|
||||
}
|
||||
readPosition -= discardCount;
|
||||
if (readPosition < 0) {
|
||||
readPosition = 0;
|
||||
}
|
||||
if (length == 0) {
|
||||
int relativeLastDiscardIndex = (relativeFirstIndex == 0 ? capacity : relativeFirstIndex) - 1;
|
||||
return offsets[relativeLastDiscardIndex] + sizes[relativeLastDiscardIndex];
|
||||
} else {
|
||||
return offsets[relativeFirstIndex];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the largest timestamp of any sample from the start of the queue up to the specified
|
||||
* length, assuming that the timestamps prior to a keyframe are always less than the timestamp of
|
||||
* the keyframe itself, and of subsequent frames.
|
||||
*
|
||||
* @param length The length of the range being searched.
|
||||
* @return The largest timestamp, or {@link Long#MIN_VALUE} if {@code length == 0}.
|
||||
*/
|
||||
private long getLargestTimestamp(int length) {
|
||||
if (length == 0) {
|
||||
return Long.MIN_VALUE;
|
||||
}
|
||||
long largestTimestampUs = Long.MIN_VALUE;
|
||||
int relativeSampleIndex = getRelativeIndex(length - 1);
|
||||
for (int i = 0; i < length; i++) {
|
||||
largestTimestampUs = Math.max(largestTimestampUs, timesUs[relativeSampleIndex]);
|
||||
if ((flags[relativeSampleIndex] & C.BUFFER_FLAG_KEY_FRAME) != 0) {
|
||||
break;
|
||||
}
|
||||
relativeSampleIndex--;
|
||||
if (relativeSampleIndex == -1) {
|
||||
relativeSampleIndex = capacity - 1;
|
||||
}
|
||||
}
|
||||
return largestTimestampUs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the relative index for a given offset from the start of the queue.
|
||||
*
|
||||
* @param offset The offset, which must be in the range [0, length].
|
||||
*/
|
||||
private int getRelativeIndex(int offset) {
|
||||
int relativeIndex = relativeFirstIndex + offset;
|
||||
return relativeIndex < capacity ? relativeIndex : relativeIndex - capacity;
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user