Distinguish dense vs sparse tracks for buffering purposes
- For audio/video, we should report the minimum largest queued timestamp. This ensures that buffering continues rather than stops in the case of bad sample interleaving. - If there are "sparse" tracks then they should be ignored for this calculation, otherwise the buffered position can end up getting stuck at some small value (e.g. if there's a text track containing a single sample with a small timestamp). - In the edge case that there are only "sparse" tracks, we probably want to take the maximum largest queued time instead. ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=130094266
This commit is contained in:
parent
c380ba2dc1
commit
d04fde1a53
@ -41,6 +41,7 @@ import com.google.android.exoplayer2.upstream.Loader;
|
||||
import com.google.android.exoplayer2.upstream.Loader.Loadable;
|
||||
import com.google.android.exoplayer2.util.Assertions;
|
||||
import com.google.android.exoplayer2.util.ConditionVariable;
|
||||
import com.google.android.exoplayer2.util.MimeTypes;
|
||||
import com.google.android.exoplayer2.util.Util;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
@ -130,6 +131,8 @@ public final class ExtractorMediaSource implements MediaPeriod, MediaSource,
|
||||
private int enabledTrackCount;
|
||||
private DefaultTrackOutput[] sampleQueues;
|
||||
private TrackGroupArray tracks;
|
||||
private boolean[] tracksAreAudioVideoFlags;
|
||||
private boolean haveAudioVideoTracks;
|
||||
private long durationUs;
|
||||
private boolean[] trackEnabledStates;
|
||||
private long length;
|
||||
@ -504,10 +507,16 @@ public final class ExtractorMediaSource implements MediaPeriod, MediaSource,
|
||||
loadCondition.close();
|
||||
int trackCount = sampleQueues.length;
|
||||
TrackGroup[] trackArray = new TrackGroup[trackCount];
|
||||
tracksAreAudioVideoFlags = new boolean[trackCount];
|
||||
trackEnabledStates = new boolean[trackCount];
|
||||
durationUs = seekMap.getDurationUs();
|
||||
for (int i = 0; i < trackCount; i++) {
|
||||
trackArray[i] = new TrackGroup(sampleQueues[i].getUpstreamFormat());
|
||||
Format format = sampleQueues[i].getUpstreamFormat();
|
||||
trackArray[i] = new TrackGroup(format);
|
||||
String sampleMimeType = format.sampleMimeType;
|
||||
tracksAreAudioVideoFlags[i] = MimeTypes.isVideo(sampleMimeType)
|
||||
|| MimeTypes.isAudio(sampleMimeType);
|
||||
haveAudioVideoTracks |= tracksAreAudioVideoFlags[i];
|
||||
}
|
||||
tracks = new TrackGroupArray(trackArray);
|
||||
prepared = true;
|
||||
@ -578,12 +587,14 @@ public final class ExtractorMediaSource implements MediaPeriod, MediaSource,
|
||||
}
|
||||
|
||||
private long getLargestQueuedTimestampUs() {
|
||||
long largestQueuedTimestampUs = Long.MIN_VALUE;
|
||||
for (DefaultTrackOutput sampleQueue : sampleQueues) {
|
||||
largestQueuedTimestampUs = Math.max(largestQueuedTimestampUs,
|
||||
sampleQueue.getLargestQueuedTimestampUs());
|
||||
long largestQueuedTimestampUs = Long.MAX_VALUE;
|
||||
for (int i = 0; i < sampleQueues.length; i++) {
|
||||
if (tracksAreAudioVideoFlags[i] || !haveAudioVideoTracks) {
|
||||
largestQueuedTimestampUs = Math.min(largestQueuedTimestampUs,
|
||||
sampleQueues[i].getLargestQueuedTimestampUs());
|
||||
}
|
||||
return largestQueuedTimestampUs;
|
||||
}
|
||||
return largestQueuedTimestampUs == Long.MAX_VALUE ? Long.MIN_VALUE : largestQueuedTimestampUs;
|
||||
}
|
||||
|
||||
private boolean isPendingReset() {
|
||||
|
Loading…
x
Reference in New Issue
Block a user