mirror of
https://github.com/androidx/media.git
synced 2025-05-06 23:20:42 +08:00
Remove durationUs from MediaFormat.
Duration was originally included in MediaFormat to match the framework class, but it actually doesn't make much sense. In many containers there's no such thing as per-stream duration, and in any case we don't really care. Setting the duration on each format required excessive piping. This change moves duration into SeekMap instead, which seems to make a lot more sense because it's at the container level, and because being able to seek is generally couplied with knowing how long the stream is. This change is also a step toward merging Format and MediaFormat into a single class (because Format doesn't have a duration), which is coming soon. ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=114428500
This commit is contained in:
parent
ebf87a3619
commit
782817d524
@ -45,18 +45,17 @@ public final class MediaFormatTest extends TestCase {
|
||||
initData.add(initData2);
|
||||
|
||||
testConversionToFrameworkFormatV16(MediaFormat.createVideoFormat(
|
||||
null, "video/xyz", 5000, 102400, 1000L, 1280, 720, initData));
|
||||
null, "video/xyz", 5000, 102400, 1280, 720, initData));
|
||||
testConversionToFrameworkFormatV16(MediaFormat.createVideoFormat(
|
||||
null, "video/xyz", 5000, MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US, 1280, 720, null));
|
||||
null, "video/xyz", 5000, MediaFormat.NO_VALUE, 1280, 720, null));
|
||||
testConversionToFrameworkFormatV16(MediaFormat.createAudioFormat(
|
||||
null, "audio/xyz", 500, 128, 1000L, 5, 44100, initData, null));
|
||||
null, "audio/xyz", 500, 128, 5, 44100, initData, null));
|
||||
testConversionToFrameworkFormatV16(MediaFormat.createAudioFormat(
|
||||
null, "audio/xyz", 500, MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US, 5, 44100, null, null));
|
||||
null, "audio/xyz", 500, MediaFormat.NO_VALUE, 5, 44100, null, null));
|
||||
testConversionToFrameworkFormatV16(
|
||||
MediaFormat.createTextFormat(null, "text/xyz", MediaFormat.NO_VALUE, 1000L, "eng"));
|
||||
MediaFormat.createTextFormat(null, "text/xyz", MediaFormat.NO_VALUE, "eng"));
|
||||
testConversionToFrameworkFormatV16(
|
||||
MediaFormat.createTextFormat(null, "text/xyz", MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US,
|
||||
null));
|
||||
MediaFormat.createTextFormat(null, "text/xyz", MediaFormat.NO_VALUE, null));
|
||||
}
|
||||
|
||||
@SuppressLint("InlinedApi")
|
||||
@ -78,11 +77,6 @@ public final class MediaFormatTest extends TestCase {
|
||||
byte[] frameworkData = Arrays.copyOf(frameworkBuffer.array(), frameworkBuffer.limit());
|
||||
assertTrue(Arrays.equals(originalData, frameworkData));
|
||||
}
|
||||
if (in.durationUs == C.UNKNOWN_TIME_US) {
|
||||
assertFalse(out.containsKey(android.media.MediaFormat.KEY_DURATION));
|
||||
} else {
|
||||
assertEquals(in.durationUs, out.getLong(android.media.MediaFormat.KEY_DURATION));
|
||||
}
|
||||
}
|
||||
|
||||
@TargetApi(16)
|
||||
|
@ -23,7 +23,6 @@ import com.google.android.exoplayer.ParserException;
|
||||
import com.google.android.exoplayer.drm.DrmInitData;
|
||||
import com.google.android.exoplayer.drm.DrmInitData.SchemeInitData;
|
||||
import com.google.android.exoplayer.extractor.ChunkIndex;
|
||||
import com.google.android.exoplayer.extractor.SeekMap;
|
||||
import com.google.android.exoplayer.extractor.webm.StreamBuilder.ContentEncodingSettings;
|
||||
import com.google.android.exoplayer.testutil.FakeExtractorOutput;
|
||||
import com.google.android.exoplayer.testutil.FakeTrackOutput;
|
||||
@ -98,8 +97,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testReadSegmentTwice() throws IOException, InterruptedException {
|
||||
@ -114,8 +113,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testPrepareOpus() throws IOException, InterruptedException {
|
||||
@ -129,8 +128,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testPrepareVorbis() throws IOException, InterruptedException {
|
||||
@ -144,8 +143,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_VORBIS);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_VORBIS);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testPrepareH264() throws IOException, InterruptedException {
|
||||
@ -158,8 +157,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertH264VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertH264VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testPrepareTwoTracks() throws IOException, InterruptedException {
|
||||
@ -175,9 +174,9 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
|
||||
assertTracksEnded();
|
||||
assertEquals(2, extractorOutput.numberOfTracks);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testPrepareThreeTracks() throws IOException, InterruptedException {
|
||||
@ -195,9 +194,9 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
assertTracksEnded();
|
||||
// Even though the input stream has 3 tracks, only 2 of them are supported and will be reported.
|
||||
assertEquals(2, extractorOutput.numberOfTracks);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testPrepareFourTracks() throws IOException, InterruptedException {
|
||||
@ -216,11 +215,11 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
|
||||
assertTracksEnded();
|
||||
assertEquals(4, extractorOutput.numberOfTracks);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_VORBIS);
|
||||
assertVp9VideoFormat(SECOND_VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertAudioFormat(SECOND_AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_VORBIS);
|
||||
assertVp9VideoFormat(SECOND_VIDEO_TRACK_NUMBER);
|
||||
assertAudioFormat(SECOND_AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
}
|
||||
|
||||
public void testPrepareContentEncodingEncryption() throws IOException, InterruptedException {
|
||||
@ -234,8 +233,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 1);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 1);
|
||||
DrmInitData drmInitData = extractorOutput.drmInitData;
|
||||
assertNotNull(drmInitData);
|
||||
SchemeInitData widevineInitData = drmInitData.get(WIDEVINE_UUID);
|
||||
@ -256,8 +255,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertIndex(DEFAULT_TIMECODE_SCALE, 3);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSeekMap(DEFAULT_TIMECODE_SCALE, 3);
|
||||
}
|
||||
|
||||
public void testPrepareCustomTimecodeScaleBeforeDuration()
|
||||
@ -286,15 +285,25 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, timecodeScale);
|
||||
assertIndex(timecodeScale, 3);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSeekMap(timecodeScale, 3);
|
||||
}
|
||||
|
||||
public void testPrepareNoCuesElement() throws IOException, InterruptedException {
|
||||
testPrepareNoCuesElement(DEFAULT_TIMECODE_SCALE);
|
||||
}
|
||||
|
||||
public void testPrepareNoCuesElementCustomTimecodeScale()
|
||||
throws IOException, InterruptedException {
|
||||
testPrepareNoCuesElement(1000);
|
||||
}
|
||||
|
||||
private void testPrepareNoCuesElement(int timecodeScale) throws IOException,
|
||||
InterruptedException {
|
||||
byte[] media = createFrameData(100);
|
||||
byte[] data = new StreamBuilder()
|
||||
.setHeader(WEBM_DOC_TYPE)
|
||||
.setInfo(DEFAULT_TIMECODE_SCALE, TEST_DURATION_TIMECODE)
|
||||
.setInfo(timecodeScale, TEST_DURATION_TIMECODE)
|
||||
.addVp9Track(VIDEO_TRACK_NUMBER, TEST_WIDTH, TEST_HEIGHT, null)
|
||||
.addSimpleBlockMedia(1 /* trackNumber */, 0 /* clusterTimecode */, 0 /* blockTimecode */,
|
||||
true /* keyframe */, false /* invisible */, media)
|
||||
@ -303,7 +312,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertIndexUnseekable();
|
||||
assertSeekMapUnseekable(timecodeScale);
|
||||
}
|
||||
|
||||
public void testAcceptsWebmDocType() throws IOException, InterruptedException {
|
||||
@ -433,7 +442,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, media, 0, true, false, null, getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
|
||||
@ -453,7 +462,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, unstrippedSampleBytes, 0, true, false, null,
|
||||
getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
@ -474,7 +483,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, unstrippedSampleBytes, 0, true, false, null,
|
||||
getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
@ -497,8 +506,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
|
||||
assertTracksEnded();
|
||||
assertEquals(2, extractorOutput.numberOfTracks);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSample(0, media, 0, true, false, null, getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
assertSample(0, media, 0, true, false, null, getTrackOutput(AUDIO_TRACK_NUMBER));
|
||||
}
|
||||
@ -524,8 +533,8 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
|
||||
assertTracksEnded();
|
||||
assertEquals(2, extractorOutput.numberOfTracks);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSample(0, media, 0, true, false, null, getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
assertSample(0, media, 0, true, false, null, getTrackOutput(AUDIO_TRACK_NUMBER));
|
||||
}
|
||||
@ -544,7 +553,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSample(0, media, 0, true, false, null, getTrackOutput(AUDIO_TRACK_NUMBER));
|
||||
}
|
||||
|
||||
@ -561,7 +570,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, media, 0, false, false, null, getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
|
||||
@ -580,7 +589,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, media, 0, true, false, TEST_ENCRYPTION_KEY_ID,
|
||||
getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
@ -620,7 +629,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, media, 25000, false, true, null, getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
|
||||
@ -638,7 +647,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, timecodeScale);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, media, 25, false, false, null, getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
|
||||
@ -655,7 +664,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE);
|
||||
assertVp9VideoFormat(VIDEO_TRACK_NUMBER);
|
||||
assertSample(0, media, 1000, true, true, null, getTrackOutput(VIDEO_TRACK_NUMBER));
|
||||
}
|
||||
|
||||
@ -673,7 +682,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
for (int i = 0; i < 20; i++) {
|
||||
long expectedTimeUs = i * TEST_DEFAULT_DURATION_NS / 1000;
|
||||
assertSample(i, Arrays.copyOfRange(media, i * 5, i * 5 + 5), expectedTimeUs, true, false,
|
||||
@ -695,7 +704,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
TestUtil.consumeTestData(extractor, data);
|
||||
|
||||
assertTracksEnded();
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, DEFAULT_TIMECODE_SCALE, MimeTypes.AUDIO_OPUS);
|
||||
assertAudioFormat(AUDIO_TRACK_NUMBER, MimeTypes.AUDIO_OPUS);
|
||||
assertSample(0, Arrays.copyOfRange(media, 0, 256), 0 * TEST_DEFAULT_DURATION_NS / 1000, true,
|
||||
false, null, getTrackOutput(AUDIO_TRACK_NUMBER));
|
||||
assertSample(1, Arrays.copyOfRange(media, 256, 257), 1 * TEST_DEFAULT_DURATION_NS / 1000, true,
|
||||
@ -712,28 +721,22 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
assertTrue(extractorOutput.tracksEnded);
|
||||
}
|
||||
|
||||
private void assertVp9VideoFormat(int trackNumber, int timecodeScale) {
|
||||
private void assertVp9VideoFormat(int trackNumber) {
|
||||
MediaFormat format = getTrackOutput(trackNumber).format;
|
||||
assertEquals(Util.scaleLargeTimestamp(TEST_DURATION_TIMECODE, timecodeScale, 1000),
|
||||
format.durationUs);
|
||||
assertEquals(TEST_WIDTH, format.width);
|
||||
assertEquals(TEST_HEIGHT, format.height);
|
||||
assertEquals(MimeTypes.VIDEO_VP9, format.mimeType);
|
||||
}
|
||||
|
||||
private void assertH264VideoFormat(int trackNumber, int timecodeScale) {
|
||||
private void assertH264VideoFormat(int trackNumber) {
|
||||
MediaFormat format = getTrackOutput(trackNumber).format;
|
||||
assertEquals(Util.scaleLargeTimestamp(TEST_DURATION_TIMECODE, timecodeScale, 1000),
|
||||
format.durationUs);
|
||||
assertEquals(TEST_WIDTH, format.width);
|
||||
assertEquals(TEST_HEIGHT, format.height);
|
||||
assertEquals(MimeTypes.VIDEO_H264, format.mimeType);
|
||||
}
|
||||
|
||||
private void assertAudioFormat(int trackNumber, int timecodeScale, String expectedMimeType) {
|
||||
private void assertAudioFormat(int trackNumber, String expectedMimeType) {
|
||||
MediaFormat format = getTrackOutput(trackNumber).format;
|
||||
assertEquals(Util.scaleLargeTimestamp(TEST_DURATION_TIMECODE, timecodeScale, 1000),
|
||||
format.durationUs);
|
||||
assertEquals(TEST_CHANNEL_COUNT, format.channelCount);
|
||||
assertEquals(TEST_SAMPLE_RATE, format.sampleRate);
|
||||
assertEquals(expectedMimeType, format.mimeType);
|
||||
@ -752,7 +755,7 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
private void assertIndex(int timecodeScale, int cuePointCount) {
|
||||
private void assertSeekMap(int timecodeScale, int cuePointCount) {
|
||||
ChunkIndex index = (ChunkIndex) extractorOutput.seekMap;
|
||||
assertEquals(cuePointCount, index.length);
|
||||
for (int i = 0; i < cuePointCount - 1; i++) {
|
||||
@ -767,10 +770,14 @@ public final class WebmExtractorTest extends InstrumentationTestCase {
|
||||
index.timesUs[lastIndex]);
|
||||
assertEquals(Util.scaleLargeTimestamp(lastDurationTimecode, timecodeScale, 1000),
|
||||
index.durationsUs[lastIndex]);
|
||||
assertEquals(Util.scaleLargeTimestamp(TEST_DURATION_TIMECODE, timecodeScale, 1000),
|
||||
extractorOutput.seekMap.getDurationUs());
|
||||
}
|
||||
|
||||
private void assertIndexUnseekable() {
|
||||
assertEquals(SeekMap.UNSEEKABLE, extractorOutput.seekMap);
|
||||
private void assertSeekMapUnseekable(long timecodeScale) {
|
||||
assertFalse(extractorOutput.seekMap.isSeekable());
|
||||
long expectedDurationUs = Util.scaleLargeTimestamp(TEST_DURATION_TIMECODE, timecodeScale, 1000);
|
||||
assertEquals(expectedDurationUs, extractorOutput.seekMap.getDurationUs());
|
||||
}
|
||||
|
||||
private void assertSample(int index, byte[] expectedMedia, long timeUs, boolean keyframe,
|
||||
|
@ -134,12 +134,11 @@ public final class FrameworkSampleSource implements SampleSource {
|
||||
pendingResets = new boolean[trackStates.length];
|
||||
tracks = new TrackGroup[trackStates.length];
|
||||
for (int i = 0; i < trackStates.length; i++) {
|
||||
MediaFormat format = createMediaFormat(extractor.getTrackFormat(i));
|
||||
tracks[i] = new TrackGroup(format);
|
||||
long trackDurationUs = format.durationUs;
|
||||
if (trackDurationUs > durationUs) {
|
||||
durationUs = trackDurationUs;
|
||||
android.media.MediaFormat format = extractor.getTrackFormat(i);
|
||||
if (format.containsKey(android.media.MediaFormat.KEY_DURATION)) {
|
||||
durationUs = Math.max(durationUs, format.getLong(android.media.MediaFormat.KEY_DURATION));
|
||||
}
|
||||
tracks[i] = new TrackGroup(createMediaFormat(format));
|
||||
}
|
||||
prepared = true;
|
||||
return true;
|
||||
@ -314,12 +313,10 @@ public final class FrameworkSampleSource implements SampleSource {
|
||||
initializationData.add(data);
|
||||
buffer.flip();
|
||||
}
|
||||
long durationUs = format.containsKey(android.media.MediaFormat.KEY_DURATION)
|
||||
? format.getLong(android.media.MediaFormat.KEY_DURATION) : C.UNKNOWN_TIME_US;
|
||||
MediaFormat mediaFormat = new MediaFormat(null, mimeType, MediaFormat.NO_VALUE, maxInputSize,
|
||||
durationUs, width, height, rotationDegrees, MediaFormat.NO_VALUE, channelCount, sampleRate,
|
||||
language, MediaFormat.OFFSET_SAMPLE_RELATIVE, initializationData, false,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE);
|
||||
width, height, rotationDegrees, MediaFormat.NO_VALUE, channelCount, sampleRate, language,
|
||||
MediaFormat.OFFSET_SAMPLE_RELATIVE, initializationData, false, MediaFormat.NO_VALUE,
|
||||
MediaFormat.NO_VALUE);
|
||||
mediaFormat.setFrameworkFormatV16(format);
|
||||
return mediaFormat;
|
||||
}
|
||||
|
@ -57,10 +57,6 @@ public final class MediaFormat {
|
||||
* if unknown or not applicable.
|
||||
*/
|
||||
public final int maxInputSize;
|
||||
/**
|
||||
* The duration in microseconds, or {@link C#UNKNOWN_TIME_US} if the duration is unknown.
|
||||
*/
|
||||
public final long durationUs;
|
||||
/**
|
||||
* Initialization data that must be provided to the decoder. Will not be null, but may be empty
|
||||
* if initialization data is not required.
|
||||
@ -138,61 +134,56 @@ public final class MediaFormat {
|
||||
private android.media.MediaFormat frameworkMediaFormat;
|
||||
|
||||
public static MediaFormat createVideoFormat(String trackId, String mimeType, int bitrate,
|
||||
int maxInputSize, long durationUs, int width, int height, List<byte[]> initializationData) {
|
||||
return createVideoFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, width, height,
|
||||
int maxInputSize, int width, int height, List<byte[]> initializationData) {
|
||||
return createVideoFormat(trackId, mimeType, bitrate, maxInputSize, width, height,
|
||||
initializationData, NO_VALUE, NO_VALUE);
|
||||
}
|
||||
|
||||
public static MediaFormat createVideoFormat(String trackId, String mimeType, int bitrate,
|
||||
int maxInputSize, long durationUs, int width, int height, List<byte[]> initializationData,
|
||||
int rotationDegrees, float pixelWidthHeightRatio) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, width, height,
|
||||
rotationDegrees, pixelWidthHeightRatio, NO_VALUE, NO_VALUE, null, OFFSET_SAMPLE_RELATIVE,
|
||||
initializationData, false, NO_VALUE, NO_VALUE);
|
||||
int maxInputSize, int width, int height, List<byte[]> initializationData, int rotationDegrees,
|
||||
float pixelWidthHeightRatio) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, width, height, rotationDegrees,
|
||||
pixelWidthHeightRatio, NO_VALUE, NO_VALUE, null, OFFSET_SAMPLE_RELATIVE, initializationData,
|
||||
false, NO_VALUE, NO_VALUE);
|
||||
}
|
||||
|
||||
public static MediaFormat createAudioFormat(String trackId, String mimeType, int bitrate,
|
||||
int maxInputSize, long durationUs, int channelCount, int sampleRate,
|
||||
List<byte[]> initializationData, String language) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, NO_VALUE, channelCount, sampleRate, language, OFFSET_SAMPLE_RELATIVE,
|
||||
initializationData, false, NO_VALUE, NO_VALUE);
|
||||
int maxInputSize, int channelCount, int sampleRate, List<byte[]> initializationData,
|
||||
String language) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, NO_VALUE, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, channelCount, sampleRate, language, OFFSET_SAMPLE_RELATIVE, initializationData,
|
||||
false, NO_VALUE, NO_VALUE);
|
||||
}
|
||||
|
||||
public static MediaFormat createTextFormat(String trackId, String mimeType, int bitrate,
|
||||
long durationUs, String language) {
|
||||
return createTextFormat(trackId, mimeType, bitrate, durationUs, language,
|
||||
OFFSET_SAMPLE_RELATIVE);
|
||||
String language) {
|
||||
return createTextFormat(trackId, mimeType, bitrate, language, OFFSET_SAMPLE_RELATIVE);
|
||||
}
|
||||
|
||||
public static MediaFormat createTextFormat(String trackId, String mimeType, int bitrate,
|
||||
long durationUs, String language, long subsampleOffsetUs) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, NO_VALUE, durationUs, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, NO_VALUE, NO_VALUE, NO_VALUE, language, subsampleOffsetUs, null, false, NO_VALUE,
|
||||
NO_VALUE);
|
||||
String language, long subsampleOffsetUs) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, NO_VALUE, NO_VALUE, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, NO_VALUE, NO_VALUE, language, subsampleOffsetUs, null, false, NO_VALUE, NO_VALUE);
|
||||
}
|
||||
|
||||
public static MediaFormat createFormatForMimeType(String trackId, String mimeType, int bitrate,
|
||||
long durationUs) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, NO_VALUE, durationUs, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, NO_VALUE, NO_VALUE, NO_VALUE, null, OFFSET_SAMPLE_RELATIVE, null, false, NO_VALUE,
|
||||
public static MediaFormat createFormatForMimeType(String trackId, String mimeType, int bitrate) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, NO_VALUE, NO_VALUE, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, NO_VALUE, NO_VALUE, null, OFFSET_SAMPLE_RELATIVE, null, false, NO_VALUE,
|
||||
NO_VALUE);
|
||||
}
|
||||
|
||||
public static MediaFormat createId3Format() {
|
||||
return createFormatForMimeType(null, MimeTypes.APPLICATION_ID3, MediaFormat.NO_VALUE,
|
||||
C.UNKNOWN_TIME_US);
|
||||
return createFormatForMimeType(null, MimeTypes.APPLICATION_ID3, MediaFormat.NO_VALUE);
|
||||
}
|
||||
|
||||
/* package */ MediaFormat(String trackId, String mimeType, int bitrate, int maxInputSize,
|
||||
long durationUs, int width, int height, int rotationDegrees, float pixelWidthHeightRatio,
|
||||
int channelCount, int sampleRate, String language, long subsampleOffsetUs,
|
||||
List<byte[]> initializationData, boolean adaptive, int maxWidth, int maxHeight) {
|
||||
int width, int height, int rotationDegrees, float pixelWidthHeightRatio, int channelCount,
|
||||
int sampleRate, String language, long subsampleOffsetUs, List<byte[]> initializationData,
|
||||
boolean adaptive, int maxWidth, int maxHeight) {
|
||||
this.trackId = trackId;
|
||||
this.mimeType = Assertions.checkNotEmpty(mimeType);
|
||||
this.bitrate = bitrate;
|
||||
this.maxInputSize = maxInputSize;
|
||||
this.durationUs = durationUs;
|
||||
this.width = width;
|
||||
this.height = height;
|
||||
this.rotationDegrees = rotationDegrees;
|
||||
@ -209,39 +200,33 @@ public final class MediaFormat {
|
||||
}
|
||||
|
||||
public MediaFormat copyWithMaxInputSize(int maxInputSize) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, width, height,
|
||||
rotationDegrees, pixelWidthHeightRatio, channelCount, sampleRate, language,
|
||||
subsampleOffsetUs, initializationData, adaptive, maxWidth, maxHeight);
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, width, height, rotationDegrees,
|
||||
pixelWidthHeightRatio, channelCount, sampleRate, language, subsampleOffsetUs,
|
||||
initializationData, adaptive, maxWidth, maxHeight);
|
||||
}
|
||||
|
||||
public MediaFormat copyWithMaxVideoDimensions(int maxWidth, int maxHeight) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, width, height,
|
||||
rotationDegrees, pixelWidthHeightRatio, channelCount, sampleRate, language,
|
||||
subsampleOffsetUs, initializationData, adaptive, maxWidth, maxHeight);
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, width, height, rotationDegrees,
|
||||
pixelWidthHeightRatio, channelCount, sampleRate, language, subsampleOffsetUs,
|
||||
initializationData, adaptive, maxWidth, maxHeight);
|
||||
}
|
||||
|
||||
public MediaFormat copyWithSubsampleOffsetUs(long subsampleOffsetUs) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, width, height,
|
||||
rotationDegrees, pixelWidthHeightRatio, channelCount, sampleRate, language,
|
||||
subsampleOffsetUs, initializationData, adaptive, maxWidth, maxHeight);
|
||||
}
|
||||
|
||||
public MediaFormat copyWithDurationUs(long durationUs) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, width, height,
|
||||
rotationDegrees, pixelWidthHeightRatio, channelCount, sampleRate, language,
|
||||
subsampleOffsetUs, initializationData, adaptive, maxWidth, maxHeight);
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, width, height, rotationDegrees,
|
||||
pixelWidthHeightRatio, channelCount, sampleRate, language, subsampleOffsetUs,
|
||||
initializationData, adaptive, maxWidth, maxHeight);
|
||||
}
|
||||
|
||||
public MediaFormat copyWithFixedTrackInfo(String trackId, int bitrate, int width, int height,
|
||||
String language) {
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, durationUs, width, height,
|
||||
rotationDegrees, pixelWidthHeightRatio, channelCount, sampleRate, language,
|
||||
subsampleOffsetUs, initializationData, adaptive, NO_VALUE, NO_VALUE);
|
||||
return new MediaFormat(trackId, mimeType, bitrate, maxInputSize, width, height, rotationDegrees,
|
||||
pixelWidthHeightRatio, channelCount, sampleRate, language, subsampleOffsetUs,
|
||||
initializationData, adaptive, NO_VALUE, NO_VALUE);
|
||||
}
|
||||
|
||||
public MediaFormat copyAsAdaptive(String trackId) {
|
||||
return new MediaFormat(trackId, mimeType, NO_VALUE, NO_VALUE, durationUs, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, NO_VALUE, NO_VALUE, NO_VALUE, null, OFFSET_SAMPLE_RELATIVE, null, true, maxWidth,
|
||||
return new MediaFormat(trackId, mimeType, NO_VALUE, NO_VALUE, NO_VALUE, NO_VALUE, NO_VALUE,
|
||||
NO_VALUE, NO_VALUE, NO_VALUE, null, OFFSET_SAMPLE_RELATIVE, null, true, maxWidth,
|
||||
maxHeight);
|
||||
}
|
||||
|
||||
@ -266,9 +251,6 @@ public final class MediaFormat {
|
||||
for (int i = 0; i < initializationData.size(); i++) {
|
||||
format.setByteBuffer("csd-" + i, ByteBuffer.wrap(initializationData.get(i)));
|
||||
}
|
||||
if (durationUs != C.UNKNOWN_TIME_US) {
|
||||
format.setLong(android.media.MediaFormat.KEY_DURATION, durationUs);
|
||||
}
|
||||
frameworkMediaFormat = format;
|
||||
}
|
||||
return frameworkMediaFormat;
|
||||
@ -290,8 +272,8 @@ public final class MediaFormat {
|
||||
public String toString() {
|
||||
return "MediaFormat(" + trackId + ", " + mimeType + ", " + bitrate + ", " + maxInputSize
|
||||
+ ", " + width + ", " + height + ", " + rotationDegrees + ", " + pixelWidthHeightRatio
|
||||
+ ", " + channelCount + ", " + sampleRate + ", " + language + ", " + durationUs + ", "
|
||||
+ adaptive + ", " + maxWidth + ", " + maxHeight + ")";
|
||||
+ ", " + channelCount + ", " + sampleRate + ", " + language + ", " + ", " + adaptive + ", "
|
||||
+ maxWidth + ", " + maxHeight + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -306,7 +288,6 @@ public final class MediaFormat {
|
||||
result = 31 * result + height;
|
||||
result = 31 * result + rotationDegrees;
|
||||
result = 31 * result + Float.floatToRawIntBits(pixelWidthHeightRatio);
|
||||
result = 31 * result + (int) durationUs;
|
||||
result = 31 * result + (adaptive ? 1231 : 1237);
|
||||
result = 31 * result + maxWidth;
|
||||
result = 31 * result + maxHeight;
|
||||
|
@ -51,6 +51,7 @@ public final class SingleSampleSource implements SampleSource, TrackStream, Load
|
||||
private final Uri uri;
|
||||
private final DataSource dataSource;
|
||||
private final MediaFormat format;
|
||||
private final long durationUs;
|
||||
private final int minLoadableRetryCount;
|
||||
private final TrackGroup tracks;
|
||||
|
||||
@ -64,15 +65,16 @@ public final class SingleSampleSource implements SampleSource, TrackStream, Load
|
||||
private int currentLoadableExceptionCount;
|
||||
private long currentLoadableExceptionTimestamp;
|
||||
|
||||
public SingleSampleSource(Uri uri, DataSource dataSource, MediaFormat format) {
|
||||
this(uri, dataSource, format, DEFAULT_MIN_LOADABLE_RETRY_COUNT);
|
||||
public SingleSampleSource(Uri uri, DataSource dataSource, MediaFormat format, long durationUs) {
|
||||
this(uri, dataSource, format, durationUs, DEFAULT_MIN_LOADABLE_RETRY_COUNT);
|
||||
}
|
||||
|
||||
public SingleSampleSource(Uri uri, DataSource dataSource, MediaFormat format,
|
||||
public SingleSampleSource(Uri uri, DataSource dataSource, MediaFormat format, long durationUs,
|
||||
int minLoadableRetryCount) {
|
||||
this.uri = uri;
|
||||
this.dataSource = dataSource;
|
||||
this.format = format;
|
||||
this.durationUs = durationUs;
|
||||
this.minLoadableRetryCount = minLoadableRetryCount;
|
||||
tracks = new TrackGroup(format);
|
||||
sampleData = new byte[INITIAL_SAMPLE_SIZE];
|
||||
@ -100,7 +102,7 @@ public final class SingleSampleSource implements SampleSource, TrackStream, Load
|
||||
|
||||
@Override
|
||||
public long getDurationUs() {
|
||||
return format.durationUs;
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -152,12 +152,11 @@ public class ChunkSampleSource implements SampleSource, TrackStream, Loader.Call
|
||||
if (!chunkSource.prepare()) {
|
||||
return false;
|
||||
}
|
||||
durationUs = C.UNKNOWN_TIME_US;
|
||||
durationUs = chunkSource.getDurationUs();
|
||||
TrackGroup trackGroup = chunkSource.getTracks();
|
||||
if (trackGroup.length > 0) {
|
||||
MediaFormat firstTrackFormat = trackGroup.getFormat(0);
|
||||
loader = new Loader("Loader:" + firstTrackFormat.mimeType);
|
||||
durationUs = firstTrackFormat.durationUs;
|
||||
}
|
||||
state = STATE_PREPARED;
|
||||
return true;
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
package com.google.android.exoplayer.chunk;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
import com.google.android.exoplayer.TrackGroup;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -50,6 +51,16 @@ public interface ChunkSource {
|
||||
*/
|
||||
boolean prepare() throws IOException;
|
||||
|
||||
/**
|
||||
* Gets the duration of the source in microseconds.
|
||||
* <p>
|
||||
* This method should only be called after the source has been prepared.
|
||||
*
|
||||
* @return The duration of the source in microseconds, or {@link C#UNKNOWN_TIME_US} if the
|
||||
* duration is unknown.
|
||||
*/
|
||||
long getDurationUs();
|
||||
|
||||
/**
|
||||
* Gets the group of tracks provided by the source.
|
||||
* <p>
|
||||
|
@ -121,6 +121,7 @@ public class DashChunkSource implements ChunkSource {
|
||||
|
||||
private boolean manifestFetcherEnabled;
|
||||
private boolean live;
|
||||
private long durationUs;
|
||||
private MediaPresentationDescription currentManifest;
|
||||
private MediaPresentationDescription processedManifest;
|
||||
private int nextPeriodHolderIndex;
|
||||
@ -248,12 +249,18 @@ public class DashChunkSource implements ChunkSource {
|
||||
return false;
|
||||
} else {
|
||||
live = currentManifest.dynamic;
|
||||
durationUs = live ? C.UNKNOWN_TIME_US : currentManifest.duration * 1000;
|
||||
selectTracks(currentManifest, 0);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDurationUs() {
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final TrackGroup getTracks() {
|
||||
return trackGroup;
|
||||
@ -509,7 +516,7 @@ public class DashChunkSource implements ChunkSource {
|
||||
MediaFormat[] trackMediaFormats = new MediaFormat[representations.size()];
|
||||
int trackCount = 0;
|
||||
for (int j = 0; j < trackMediaFormats.length; j++) {
|
||||
trackMediaFormats[trackCount] = getMediaFormat(manifest, representations.get(j).format);
|
||||
trackMediaFormats[trackCount] = getMediaFormat(representations.get(j).format);
|
||||
if (trackMediaFormats[trackCount] != null) {
|
||||
trackFormats[trackCount++] = representations.get(j).format;
|
||||
}
|
||||
@ -523,15 +530,14 @@ public class DashChunkSource implements ChunkSource {
|
||||
trackFormats = new Format[0];
|
||||
}
|
||||
|
||||
private MediaFormat getMediaFormat(MediaPresentationDescription manifest,
|
||||
Format representationFormat) {
|
||||
private MediaFormat getMediaFormat(Format representationFormat) {
|
||||
String mediaMimeType = getMediaMimeType(representationFormat);
|
||||
if (mediaMimeType == null) {
|
||||
Log.w(TAG, "Skipped track " + representationFormat.id + " (unknown media mime type)");
|
||||
return null;
|
||||
}
|
||||
MediaFormat trackFormat = getTrackFormat(adaptationSetType, representationFormat,
|
||||
mediaMimeType, manifest.dynamic ? C.UNKNOWN_TIME_US : manifest.duration * 1000);
|
||||
mediaMimeType);
|
||||
if (trackFormat == null) {
|
||||
Log.w(TAG, "Skipped track " + representationFormat.id + " (unknown media format)");
|
||||
return null;
|
||||
@ -545,17 +551,17 @@ public class DashChunkSource implements ChunkSource {
|
||||
}
|
||||
|
||||
private static MediaFormat getTrackFormat(int adaptationSetType, Format format,
|
||||
String mediaMimeType, long durationUs) {
|
||||
String mediaMimeType) {
|
||||
switch (adaptationSetType) {
|
||||
case AdaptationSet.TYPE_VIDEO:
|
||||
return MediaFormat.createVideoFormat(format.id, mediaMimeType, format.bitrate,
|
||||
MediaFormat.NO_VALUE, durationUs, format.width, format.height, null);
|
||||
MediaFormat.NO_VALUE, format.width, format.height, null);
|
||||
case AdaptationSet.TYPE_AUDIO:
|
||||
return MediaFormat.createAudioFormat(format.id, mediaMimeType, format.bitrate,
|
||||
MediaFormat.NO_VALUE, durationUs, format.audioChannels, format.audioSamplingRate, null,
|
||||
MediaFormat.NO_VALUE, format.audioChannels, format.audioSamplingRate, null,
|
||||
format.language);
|
||||
case AdaptationSet.TYPE_TEXT:
|
||||
return MediaFormat.createTextFormat(format.id, mediaMimeType, format.bitrate, durationUs,
|
||||
return MediaFormat.createTextFormat(format.id, mediaMimeType, format.bitrate,
|
||||
format.language);
|
||||
default:
|
||||
return null;
|
||||
|
@ -47,18 +47,23 @@ public final class ChunkIndex implements SeekMap {
|
||||
*/
|
||||
public final long[] timesUs;
|
||||
|
||||
private final long durationUs;
|
||||
|
||||
/**
|
||||
* @param durationUs The duration of the stream.
|
||||
* @param sizes The chunk sizes, in bytes.
|
||||
* @param offsets The chunk byte offsets.
|
||||
* @param durationsUs The chunk durations, in microseconds.
|
||||
* @param timesUs The start time of each chunk, in microseconds.
|
||||
*/
|
||||
public ChunkIndex(int[] sizes, long[] offsets, long[] durationsUs, long[] timesUs) {
|
||||
this.length = sizes.length;
|
||||
public ChunkIndex(long durationUs, int[] sizes, long[] offsets, long[] durationsUs,
|
||||
long[] timesUs) {
|
||||
this.durationUs = durationUs;
|
||||
this.sizes = sizes;
|
||||
this.offsets = offsets;
|
||||
this.durationsUs = durationsUs;
|
||||
this.timesUs = timesUs;
|
||||
length = sizes.length;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -78,6 +83,11 @@ public final class ChunkIndex implements SeekMap {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDurationUs() {
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPosition(long timeUs) {
|
||||
return offsets[getChunkIndex(timeUs)];
|
||||
|
@ -16,7 +16,6 @@
|
||||
package com.google.android.exoplayer.extractor;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
import com.google.android.exoplayer.MediaFormat;
|
||||
import com.google.android.exoplayer.MediaFormatHolder;
|
||||
import com.google.android.exoplayer.ParserException;
|
||||
import com.google.android.exoplayer.SampleHolder;
|
||||
@ -267,13 +266,9 @@ public final class ExtractorSampleSource implements SampleSource, ExtractorOutpu
|
||||
trackEnabledStates = new boolean[trackCount];
|
||||
pendingResets = new boolean[trackCount];
|
||||
pendingMediaFormat = new boolean[trackCount];
|
||||
durationUs = C.UNKNOWN_TIME_US;
|
||||
durationUs = seekMap.getDurationUs();
|
||||
for (int i = 0; i < trackCount; i++) {
|
||||
MediaFormat format = sampleQueues.valueAt(i).getFormat();
|
||||
tracks[i] = new TrackGroup(format);
|
||||
if (format.durationUs > durationUs) {
|
||||
durationUs = format.durationUs;
|
||||
}
|
||||
tracks[i] = new TrackGroup(sampleQueues.valueAt(i).getFormat());
|
||||
}
|
||||
prepared = true;
|
||||
return true;
|
||||
|
@ -15,6 +15,8 @@
|
||||
*/
|
||||
package com.google.android.exoplayer.extractor;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
|
||||
/**
|
||||
* Maps seek positions (in microseconds) to corresponding positions (byte offsets) in the stream.
|
||||
*/
|
||||
@ -23,19 +25,34 @@ public interface SeekMap {
|
||||
/**
|
||||
* A {@link SeekMap} that does not support seeking.
|
||||
*/
|
||||
public static final SeekMap UNSEEKABLE = new SeekMap() {
|
||||
final class Unseekable implements SeekMap {
|
||||
|
||||
private final long durationUs;
|
||||
|
||||
/**
|
||||
* @param durationUs The duration of the stream in microseconds, or {@link C#UNKNOWN_TIME_US} if
|
||||
* the duration is unknown.
|
||||
*/
|
||||
public Unseekable(long durationUs) {
|
||||
this.durationUs = durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSeekable() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDurationUs() {
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPosition(long timeUs) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether or not the seeking is supported.
|
||||
@ -47,6 +64,14 @@ public interface SeekMap {
|
||||
*/
|
||||
boolean isSeekable();
|
||||
|
||||
/**
|
||||
* Returns the duration of the stream in microseconds.
|
||||
*
|
||||
* @return The duration of the stream in microseconds, or {@link C#UNKNOWN_TIME_US} if the
|
||||
* duration is unknown.
|
||||
*/
|
||||
long getDurationUs();
|
||||
|
||||
/**
|
||||
* Maps a seek position in microseconds to a corresponding position (byte offset) in the stream
|
||||
* from which data can be provided to the extractor.
|
||||
|
@ -87,8 +87,8 @@ import java.util.Collections;
|
||||
Pair<Integer, Integer> audioParams = CodecSpecificDataUtil.parseAacAudioSpecificConfig(
|
||||
audioSpecifiConfig);
|
||||
MediaFormat mediaFormat = MediaFormat.createAudioFormat(null, MimeTypes.AUDIO_AAC,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, getDurationUs(), audioParams.second,
|
||||
audioParams.first, Collections.singletonList(audioSpecifiConfig), null);
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, audioParams.second, audioParams.first,
|
||||
Collections.singletonList(audioSpecifiConfig), null);
|
||||
output.format(mediaFormat);
|
||||
hasOutputFormat = true;
|
||||
} else if (packetType == AAC_PACKET_TYPE_AAC_RAW) {
|
||||
|
@ -15,7 +15,6 @@
|
||||
*/
|
||||
package com.google.android.exoplayer.extractor.flv;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
import com.google.android.exoplayer.extractor.Extractor;
|
||||
import com.google.android.exoplayer.extractor.ExtractorInput;
|
||||
import com.google.android.exoplayer.extractor.ExtractorOutput;
|
||||
@ -237,14 +236,6 @@ public final class FlvExtractor implements Extractor, SeekMap {
|
||||
videoReader.consume(prepareTagData(input), tagTimestampUs);
|
||||
} else if (tagType == TAG_TYPE_SCRIPT_DATA && metadataReader != null) {
|
||||
metadataReader.consume(prepareTagData(input), tagTimestampUs);
|
||||
if (metadataReader.getDurationUs() != C.UNKNOWN_TIME_US) {
|
||||
if (audioReader != null) {
|
||||
audioReader.setDurationUs(metadataReader.getDurationUs());
|
||||
}
|
||||
if (videoReader != null) {
|
||||
videoReader.setDurationUs(metadataReader.getDurationUs());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
input.skipFully(tagDataSize);
|
||||
wasConsumed = false;
|
||||
@ -273,6 +264,11 @@ public final class FlvExtractor implements Extractor, SeekMap {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDurationUs() {
|
||||
return metadataReader.getDurationUs();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPosition(long timeUs) {
|
||||
return 0;
|
||||
|
@ -43,11 +43,18 @@ import java.util.Map;
|
||||
private static final int AMF_TYPE_STRICT_ARRAY = 10;
|
||||
private static final int AMF_TYPE_DATE = 11;
|
||||
|
||||
private long durationUs;
|
||||
|
||||
/**
|
||||
* @param output A {@link TrackOutput} to which samples should be written.
|
||||
*/
|
||||
public ScriptTagPayloadReader(TrackOutput output) {
|
||||
super(output);
|
||||
durationUs = C.UNKNOWN_TIME_US;
|
||||
}
|
||||
|
||||
public long getDurationUs() {
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -82,7 +89,7 @@ import java.util.Map;
|
||||
if (metadata.containsKey(KEY_DURATION)) {
|
||||
double durationSeconds = (double) metadata.get(KEY_DURATION);
|
||||
if (durationSeconds > 0.0) {
|
||||
setDurationUs((long) (durationSeconds * C.MICROS_PER_SECOND));
|
||||
durationUs = (long) (durationSeconds * C.MICROS_PER_SECOND);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
*/
|
||||
package com.google.android.exoplayer.extractor.flv;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
import com.google.android.exoplayer.ParserException;
|
||||
import com.google.android.exoplayer.extractor.TrackOutput;
|
||||
import com.google.android.exoplayer.util.ParsableByteArray;
|
||||
@ -38,32 +37,11 @@ import com.google.android.exoplayer.util.ParsableByteArray;
|
||||
|
||||
protected final TrackOutput output;
|
||||
|
||||
private long durationUs;
|
||||
|
||||
/**
|
||||
* @param output A {@link TrackOutput} to which samples should be written.
|
||||
*/
|
||||
protected TagPayloadReader(TrackOutput output) {
|
||||
this.output = output;
|
||||
this.durationUs = C.UNKNOWN_TIME_US;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets duration in microseconds.
|
||||
*
|
||||
* @param durationUs duration in microseconds.
|
||||
*/
|
||||
public final void setDurationUs(long durationUs) {
|
||||
this.durationUs = durationUs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the duration in microseconds.
|
||||
*
|
||||
* @return The duration in microseconds.
|
||||
*/
|
||||
public final long getDurationUs() {
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,9 +96,8 @@ import java.util.List;
|
||||
|
||||
// Construct and output the format.
|
||||
MediaFormat mediaFormat = MediaFormat.createVideoFormat(null, MimeTypes.VIDEO_H264,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, getDurationUs(), avcData.width,
|
||||
avcData.height, avcData.initializationData, MediaFormat.NO_VALUE,
|
||||
avcData.pixelWidthAspectRatio);
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, avcData.width, avcData.height,
|
||||
avcData.initializationData, MediaFormat.NO_VALUE, avcData.pixelWidthAspectRatio);
|
||||
output.format(mediaFormat);
|
||||
hasOutputFormat = true;
|
||||
} else if (packetType == AVC_PACKET_TYPE_AVC_NALU) {
|
||||
|
@ -120,8 +120,8 @@ public final class Mp3Extractor implements Extractor {
|
||||
setupSeeker(input);
|
||||
extractorOutput.seekMap(seeker);
|
||||
trackOutput.format(MediaFormat.createAudioFormat(null, synchronizedHeader.mimeType,
|
||||
MediaFormat.NO_VALUE, MpegAudioHeader.MAX_FRAME_SIZE_BYTES, seeker.getDurationUs(),
|
||||
synchronizedHeader.channels, synchronizedHeader.sampleRate, null, null));
|
||||
MediaFormat.NO_VALUE, MpegAudioHeader.MAX_FRAME_SIZE_BYTES, synchronizedHeader.channels,
|
||||
synchronizedHeader.sampleRate, null, null));
|
||||
}
|
||||
return readSample(input);
|
||||
}
|
||||
@ -320,9 +320,6 @@ public final class Mp3Extractor implements Extractor {
|
||||
*/
|
||||
long getTimeUs(long position);
|
||||
|
||||
/** Returns the duration of the source, in microseconds. */
|
||||
long getDurationUs();
|
||||
|
||||
}
|
||||
|
||||
/* package */ static final class Metadata {
|
||||
|
@ -67,7 +67,7 @@ import java.util.List;
|
||||
|
||||
Pair<Long, String> mdhdData = parseMdhd(mdia.getLeafAtomOfType(Atom.TYPE_mdhd).data);
|
||||
StsdData stsdData = parseStsd(stbl.getLeafAtomOfType(Atom.TYPE_stsd).data, tkhdData.id,
|
||||
durationUs, tkhdData.rotationDegrees, mdhdData.second, isQuickTime);
|
||||
tkhdData.rotationDegrees, mdhdData.second, isQuickTime);
|
||||
Pair<long[], long[]> edtsData = parseEdts(trak.getContainerAtomOfType(Atom.TYPE_edts));
|
||||
return stsdData.mediaFormat == null ? null
|
||||
: new Track(tkhdData.id, trackType, mdhdData.first, movieTimescale, durationUs,
|
||||
@ -438,14 +438,13 @@ import java.util.List;
|
||||
*
|
||||
* @param stsd The stsd atom to parse.
|
||||
* @param trackId The track's identifier in its container.
|
||||
* @param durationUs The duration of the track in microseconds.
|
||||
* @param rotationDegrees The rotation of the track in degrees.
|
||||
* @param language The language of the track.
|
||||
* @param isQuickTime True for QuickTime media. False otherwise.
|
||||
* @return An object containing the parsed data.
|
||||
*/
|
||||
private static StsdData parseStsd(ParsableByteArray stsd, int trackId, long durationUs,
|
||||
int rotationDegrees, String language, boolean isQuickTime) {
|
||||
private static StsdData parseStsd(ParsableByteArray stsd, int trackId, int rotationDegrees,
|
||||
String language, boolean isQuickTime) {
|
||||
stsd.setPosition(Atom.FULL_HEADER_SIZE);
|
||||
int numberOfEntries = stsd.readInt();
|
||||
StsdData out = new StsdData(numberOfEntries);
|
||||
@ -458,27 +457,27 @@ import java.util.List;
|
||||
|| childAtomType == Atom.TYPE_encv || childAtomType == Atom.TYPE_mp4v
|
||||
|| childAtomType == Atom.TYPE_hvc1 || childAtomType == Atom.TYPE_hev1
|
||||
|| childAtomType == Atom.TYPE_s263) {
|
||||
parseVideoSampleEntry(stsd, childStartPosition, childAtomSize, trackId, durationUs,
|
||||
rotationDegrees, out, i);
|
||||
parseVideoSampleEntry(stsd, childStartPosition, childAtomSize, trackId, rotationDegrees,
|
||||
out, i);
|
||||
} else if (childAtomType == Atom.TYPE_mp4a || childAtomType == Atom.TYPE_enca
|
||||
|| childAtomType == Atom.TYPE_ac_3 || childAtomType == Atom.TYPE_ec_3
|
||||
|| childAtomType == Atom.TYPE_dtsc || childAtomType == Atom.TYPE_dtse
|
||||
|| childAtomType == Atom.TYPE_dtsh || childAtomType == Atom.TYPE_dtsl
|
||||
|| childAtomType == Atom.TYPE_samr || childAtomType == Atom.TYPE_sawb) {
|
||||
parseAudioSampleEntry(stsd, childAtomType, childStartPosition, childAtomSize, trackId,
|
||||
durationUs, language, isQuickTime, out, i);
|
||||
language, isQuickTime, out, i);
|
||||
} else if (childAtomType == Atom.TYPE_TTML) {
|
||||
out.mediaFormat = MediaFormat.createTextFormat(Integer.toString(trackId),
|
||||
MimeTypes.APPLICATION_TTML, MediaFormat.NO_VALUE, durationUs, language);
|
||||
MimeTypes.APPLICATION_TTML, MediaFormat.NO_VALUE, language);
|
||||
} else if (childAtomType == Atom.TYPE_tx3g) {
|
||||
out.mediaFormat = MediaFormat.createTextFormat(Integer.toString(trackId),
|
||||
MimeTypes.APPLICATION_TX3G, MediaFormat.NO_VALUE, durationUs, language);
|
||||
MimeTypes.APPLICATION_TX3G, MediaFormat.NO_VALUE, language);
|
||||
} else if (childAtomType == Atom.TYPE_wvtt) {
|
||||
out.mediaFormat = MediaFormat.createTextFormat(Integer.toString(trackId),
|
||||
MimeTypes.APPLICATION_MP4VTT, MediaFormat.NO_VALUE, durationUs, language);
|
||||
MimeTypes.APPLICATION_MP4VTT, MediaFormat.NO_VALUE, language);
|
||||
} else if (childAtomType == Atom.TYPE_stpp) {
|
||||
out.mediaFormat = MediaFormat.createTextFormat(Integer.toString(trackId),
|
||||
MimeTypes.APPLICATION_TTML, MediaFormat.NO_VALUE, durationUs, language,
|
||||
MimeTypes.APPLICATION_TTML, MediaFormat.NO_VALUE, language,
|
||||
0 /* subsample timing is absolute */);
|
||||
}
|
||||
stsd.setPosition(childStartPosition + childAtomSize);
|
||||
@ -487,7 +486,7 @@ import java.util.List;
|
||||
}
|
||||
|
||||
private static void parseVideoSampleEntry(ParsableByteArray parent, int position, int size,
|
||||
int trackId, long durationUs, int rotationDegrees, StsdData out, int entryIndex) {
|
||||
int trackId, int rotationDegrees, StsdData out, int entryIndex) {
|
||||
parent.setPosition(position + Atom.HEADER_SIZE);
|
||||
|
||||
parent.skipBytes(24);
|
||||
@ -550,7 +549,7 @@ import java.util.List;
|
||||
}
|
||||
|
||||
out.mediaFormat = MediaFormat.createVideoFormat(Integer.toString(trackId), mimeType,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, durationUs, width, height, initializationData,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, width, height, initializationData,
|
||||
rotationDegrees, pixelWidthHeightRatio);
|
||||
}
|
||||
|
||||
@ -712,8 +711,7 @@ import java.util.List;
|
||||
}
|
||||
|
||||
private static void parseAudioSampleEntry(ParsableByteArray parent, int atomType, int position,
|
||||
int size, int trackId, long durationUs, String language, boolean isQuickTime, StsdData out,
|
||||
int entryIndex) {
|
||||
int size, int trackId, String language, boolean isQuickTime, StsdData out, int entryIndex) {
|
||||
parent.setPosition(position + Atom.HEADER_SIZE);
|
||||
|
||||
int quickTimeSoundDescriptionVersion = 0;
|
||||
@ -789,19 +787,18 @@ import java.util.List;
|
||||
// TODO: Add support for encryption (by setting out.trackEncryptionBoxes).
|
||||
parent.setPosition(Atom.HEADER_SIZE + childAtomPosition);
|
||||
out.mediaFormat = Ac3Util.parseAc3AnnexFFormat(parent, Integer.toString(trackId),
|
||||
durationUs, language);
|
||||
language);
|
||||
return;
|
||||
} else if (atomType == Atom.TYPE_ec_3 && childAtomType == Atom.TYPE_dec3) {
|
||||
parent.setPosition(Atom.HEADER_SIZE + childAtomPosition);
|
||||
out.mediaFormat = Ac3Util.parseEAc3AnnexFFormat(parent, Integer.toString(trackId),
|
||||
durationUs, language);
|
||||
language);
|
||||
return;
|
||||
} else if ((atomType == Atom.TYPE_dtsc || atomType == Atom.TYPE_dtse
|
||||
|| atomType == Atom.TYPE_dtsh || atomType == Atom.TYPE_dtsl)
|
||||
&& childAtomType == Atom.TYPE_ddts) {
|
||||
out.mediaFormat = MediaFormat.createAudioFormat(Integer.toString(trackId), mimeType,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, durationUs, channelCount, sampleRate, null,
|
||||
language);
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, channelCount, sampleRate, null, language);
|
||||
return;
|
||||
}
|
||||
childAtomPosition += childAtomSize;
|
||||
@ -813,7 +810,7 @@ import java.util.List;
|
||||
}
|
||||
|
||||
out.mediaFormat = MediaFormat.createAudioFormat(Integer.toString(trackId), mimeType,
|
||||
MediaFormat.NO_VALUE, sampleSize, durationUs, channelCount, sampleRate,
|
||||
MediaFormat.NO_VALUE, sampleSize, channelCount, sampleRate,
|
||||
initializationData == null ? null : Collections.singletonList(initializationData),
|
||||
language);
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ public final class FragmentedMp4Extractor implements Extractor {
|
||||
if (atomType == Atom.TYPE_mdat) {
|
||||
endOfMdatPosition = atomPosition + atomSize;
|
||||
if (!haveOutputSeekMap) {
|
||||
extractorOutput.seekMap(SeekMap.UNSEEKABLE);
|
||||
extractorOutput.seekMap(new SeekMap.Unseekable(track.durationUs));
|
||||
haveOutputSeekMap = true;
|
||||
}
|
||||
if (fragmentRun.sampleEncryptionDataNeedsFill) {
|
||||
@ -271,7 +271,7 @@ public final class FragmentedMp4Extractor implements Extractor {
|
||||
if (!containerAtoms.isEmpty()) {
|
||||
containerAtoms.peek().add(leaf);
|
||||
} else if (leaf.type == Atom.TYPE_sidx) {
|
||||
ChunkIndex segmentIndex = parseSidx(leaf.data, inputPosition);
|
||||
ChunkIndex segmentIndex = parseSidx(track.durationUs, leaf.data, inputPosition);
|
||||
extractorOutput.seekMap(segmentIndex);
|
||||
haveOutputSeekMap = true;
|
||||
}
|
||||
@ -623,7 +623,7 @@ public final class FragmentedMp4Extractor implements Extractor {
|
||||
/**
|
||||
* Parses a sidx atom (defined in 14496-12).
|
||||
*/
|
||||
private static ChunkIndex parseSidx(ParsableByteArray atom, long inputPosition)
|
||||
private static ChunkIndex parseSidx(long durationUs, ParsableByteArray atom, long inputPosition)
|
||||
throws ParserException {
|
||||
atom.setPosition(Atom.HEADER_SIZE);
|
||||
int fullAtom = atom.readInt();
|
||||
@ -674,7 +674,7 @@ public final class FragmentedMp4Extractor implements Extractor {
|
||||
offset += sizes[i];
|
||||
}
|
||||
|
||||
return new ChunkIndex(sizes, offsets, durationsUs, timesUs);
|
||||
return new ChunkIndex(durationUs, sizes, offsets, durationsUs, timesUs);
|
||||
}
|
||||
|
||||
private void readEncryptionData(ExtractorInput input) throws IOException, InterruptedException {
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
package com.google.android.exoplayer.extractor.mp4;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
import com.google.android.exoplayer.extractor.Extractor;
|
||||
import com.google.android.exoplayer.extractor.ExtractorInput;
|
||||
import com.google.android.exoplayer.extractor.ExtractorOutput;
|
||||
@ -72,6 +73,7 @@ public final class Mp4Extractor implements Extractor, SeekMap {
|
||||
// Extractor outputs.
|
||||
private ExtractorOutput extractorOutput;
|
||||
private Mp4Track[] tracks;
|
||||
private long durationUs;
|
||||
private boolean isQuickTime;
|
||||
|
||||
public Mp4Extractor() {
|
||||
@ -136,6 +138,11 @@ public final class Mp4Extractor implements Extractor, SeekMap {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDurationUs() {
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getPosition(long timeUs) {
|
||||
long earliestSamplePosition = Long.MAX_VALUE;
|
||||
@ -270,6 +277,7 @@ public final class Mp4Extractor implements Extractor, SeekMap {
|
||||
|
||||
/** Updates the stored track metadata to reflect the contents of the specified moov atom. */
|
||||
private void processMoovAtom(ContainerAtom moov) {
|
||||
long durationUs = C.UNKNOWN_TIME_US;
|
||||
List<Mp4Track> tracks = new ArrayList<>();
|
||||
long earliestSampleOffset = Long.MAX_VALUE;
|
||||
for (int i = 0; i < moov.containerChildren.size(); i++) {
|
||||
@ -296,6 +304,8 @@ public final class Mp4Extractor implements Extractor, SeekMap {
|
||||
// Allow ten source samples per output sample, like the platform extractor.
|
||||
int maxInputSize = trackSampleTable.maximumSize + 3 * 10;
|
||||
mp4Track.trackOutput.format(track.mediaFormat.copyWithMaxInputSize(maxInputSize));
|
||||
|
||||
durationUs = Math.max(durationUs, track.durationUs);
|
||||
tracks.add(mp4Track);
|
||||
|
||||
long firstSampleOffset = trackSampleTable.offsets[0];
|
||||
@ -303,6 +313,7 @@ public final class Mp4Extractor implements Extractor, SeekMap {
|
||||
earliestSampleOffset = firstSampleOffset;
|
||||
}
|
||||
}
|
||||
this.durationUs = durationUs;
|
||||
this.tracks = tracks.toArray(new Mp4Track[0]);
|
||||
extractorOutput.endTracks();
|
||||
extractorOutput.seekMap(this);
|
||||
|
@ -25,6 +25,7 @@ import com.google.android.exoplayer.extractor.PositionHolder;
|
||||
import com.google.android.exoplayer.extractor.SeekMap;
|
||||
import com.google.android.exoplayer.extractor.TrackOutput;
|
||||
import com.google.android.exoplayer.extractor.ogg.VorbisUtil.Mode;
|
||||
import com.google.android.exoplayer.extractor.ogg.VorbisUtil.VorbisIdHeader;
|
||||
import com.google.android.exoplayer.util.MimeTypes;
|
||||
import com.google.android.exoplayer.util.ParsableByteArray;
|
||||
|
||||
@ -75,7 +76,7 @@ public final class OggVorbisExtractor implements Extractor {
|
||||
public void init(ExtractorOutput output) {
|
||||
trackOutput = output.track(0);
|
||||
output.endTracks();
|
||||
output.seekMap(SeekMap.UNSEEKABLE);
|
||||
output.seekMap(new SeekMap.Unseekable(C.UNKNOWN_TIME_US));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -93,17 +94,14 @@ public final class OggVorbisExtractor implements Extractor {
|
||||
|
||||
if (vorbisSetup == null) {
|
||||
vorbisSetup = readSetupHeaders(input, scratch);
|
||||
ArrayList<byte[]> codecInitialisationData = new ArrayList<>();
|
||||
codecInitialisationData.clear();
|
||||
codecInitialisationData.add(vorbisSetup.idHeader.data);
|
||||
codecInitialisationData.add(vorbisSetup.setupHeaderData);
|
||||
|
||||
long duration = input.getLength() == C.LENGTH_UNBOUNDED ? C.UNKNOWN_TIME_US
|
||||
: input.getLength() * 8000000 / vorbisSetup.idHeader.getApproximateBitrate();
|
||||
VorbisIdHeader idHeader = vorbisSetup.idHeader;
|
||||
ArrayList<byte[]> codecInitializationData = new ArrayList<>();
|
||||
codecInitializationData.clear();
|
||||
codecInitializationData.add(idHeader.data);
|
||||
codecInitializationData.add(vorbisSetup.setupHeaderData);
|
||||
trackOutput.format(MediaFormat.createAudioFormat(null, MimeTypes.AUDIO_VORBIS,
|
||||
this.vorbisSetup.idHeader.bitrateNominal, OGG_MAX_SEGMENT_SIZE * 255, duration,
|
||||
this.vorbisSetup.idHeader.channels, (int) this.vorbisSetup.idHeader.sampleRate,
|
||||
codecInitialisationData, null));
|
||||
idHeader.bitrateNominal, OGG_MAX_SEGMENT_SIZE * 255, idHeader.channels,
|
||||
idHeader.sampleRate, codecInitializationData, null));
|
||||
}
|
||||
if (oggReader.readPacket(input, scratch)) {
|
||||
// if this is an audio packet...
|
||||
|
@ -75,7 +75,7 @@ import java.util.Arrays;
|
||||
|
||||
long version = headerData.readLittleEndianUnsignedInt();
|
||||
int channels = headerData.readUnsignedByte();
|
||||
long sampleRate = headerData.readLittleEndianUnsignedInt();
|
||||
int sampleRate = (int) headerData.readLittleEndianUnsignedInt();
|
||||
int bitrateMax = headerData.readLittleEndianInt();
|
||||
int bitrateNominal = headerData.readLittleEndianInt();
|
||||
int bitrateMin = headerData.readLittleEndianInt();
|
||||
@ -433,7 +433,7 @@ import java.util.Arrays;
|
||||
|
||||
public final long version;
|
||||
public final int channels;
|
||||
public final long sampleRate;
|
||||
public final int sampleRate;
|
||||
public final int bitrateMax;
|
||||
public final int bitrateNominal;
|
||||
public final int bitrateMin;
|
||||
@ -442,7 +442,7 @@ import java.util.Arrays;
|
||||
public final boolean framingFlag;
|
||||
public final byte[] data;
|
||||
|
||||
public VorbisIdHeader(long version, int channels, long sampleRate, int bitrateMax,
|
||||
public VorbisIdHeader(long version, int channels, int sampleRate, int bitrateMax,
|
||||
int bitrateNominal, int bitrateMin, int blockSize0, int blockSize1, boolean framingFlag,
|
||||
byte[] data) {
|
||||
this.version = version;
|
||||
|
@ -161,8 +161,8 @@ import com.google.android.exoplayer.util.ParsableByteArray;
|
||||
private void parseHeader() {
|
||||
if (mediaFormat == null) {
|
||||
mediaFormat = isEac3
|
||||
? Ac3Util.parseEac3SyncframeFormat(headerScratchBits, null, C.UNKNOWN_TIME_US, null)
|
||||
: Ac3Util.parseAc3SyncframeFormat(headerScratchBits, null, C.UNKNOWN_TIME_US, null);
|
||||
? Ac3Util.parseEac3SyncframeFormat(headerScratchBits, null, null)
|
||||
: Ac3Util.parseAc3SyncframeFormat(headerScratchBits, null, null);
|
||||
output.format(mediaFormat);
|
||||
}
|
||||
sampleSize = isEac3 ? Ac3Util.parseEAc3SyncframeSize(headerScratchBits.data)
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
package com.google.android.exoplayer.extractor.ts;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
import com.google.android.exoplayer.extractor.Extractor;
|
||||
import com.google.android.exoplayer.extractor.ExtractorInput;
|
||||
import com.google.android.exoplayer.extractor.ExtractorOutput;
|
||||
@ -112,7 +113,7 @@ public final class AdtsExtractor implements Extractor {
|
||||
public void init(ExtractorOutput output) {
|
||||
adtsReader = new AdtsReader(output.track(0), output.track(1));
|
||||
output.endTracks();
|
||||
output.seekMap(SeekMap.UNSEEKABLE);
|
||||
output.seekMap(new SeekMap.Unseekable(C.UNKNOWN_TIME_US));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -259,8 +259,8 @@ import java.util.Collections;
|
||||
audioSpecificConfig);
|
||||
|
||||
MediaFormat mediaFormat = MediaFormat.createAudioFormat(null, MimeTypes.AUDIO_AAC,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US, audioParams.second,
|
||||
audioParams.first, Collections.singletonList(audioSpecificConfig), null);
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, audioParams.second, audioParams.first,
|
||||
Collections.singletonList(audioSpecificConfig), null);
|
||||
// In this class a sample is an access unit, but the MediaFormat sample rate specifies the
|
||||
// number of PCM audio samples per second.
|
||||
sampleDurationUs = (C.MICROS_PER_SECOND * 1024) / mediaFormat.sampleRate;
|
||||
|
@ -153,7 +153,7 @@ import com.google.android.exoplayer.util.ParsableByteArray;
|
||||
private void parseHeader() {
|
||||
byte[] frameData = headerScratchBytes.data;
|
||||
if (mediaFormat == null) {
|
||||
mediaFormat = DtsUtil.parseDtsFormat(frameData, null, C.UNKNOWN_TIME_US, null);
|
||||
mediaFormat = DtsUtil.parseDtsFormat(frameData, null, null);
|
||||
output.format(mediaFormat);
|
||||
}
|
||||
sampleSize = DtsUtil.getDtsFrameSize(frameData);
|
||||
|
@ -184,7 +184,7 @@ import java.util.Collections;
|
||||
}
|
||||
|
||||
MediaFormat format = MediaFormat.createVideoFormat(null, MimeTypes.VIDEO_MPEG2,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US, width, height,
|
||||
MediaFormat.NO_VALUE, MediaFormat.NO_VALUE, width, height,
|
||||
Collections.singletonList(csdData), MediaFormat.NO_VALUE, pixelWidthHeightRatio);
|
||||
|
||||
long frameDurationUs = 0;
|
||||
|
@ -210,8 +210,8 @@ import java.util.List;
|
||||
SpsData parsedSpsData = CodecSpecificDataUtil.parseSpsNalUnit(bitArray);
|
||||
|
||||
return MediaFormat.createVideoFormat(null, MimeTypes.VIDEO_H264, MediaFormat.NO_VALUE,
|
||||
MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US, parsedSpsData.width, parsedSpsData.height,
|
||||
initializationData, MediaFormat.NO_VALUE, parsedSpsData.pixelWidthAspectRatio);
|
||||
MediaFormat.NO_VALUE, parsedSpsData.width, parsedSpsData.height, initializationData,
|
||||
MediaFormat.NO_VALUE, parsedSpsData.pixelWidthAspectRatio);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -298,7 +298,7 @@ import java.util.Collections;
|
||||
}
|
||||
|
||||
return MediaFormat.createVideoFormat(null, MimeTypes.VIDEO_H265, MediaFormat.NO_VALUE,
|
||||
MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US, picWidthInLumaSamples, picHeightInLumaSamples,
|
||||
MediaFormat.NO_VALUE, picWidthInLumaSamples, picHeightInLumaSamples,
|
||||
Collections.singletonList(csd), MediaFormat.NO_VALUE, pixelWidthHeightRatio);
|
||||
}
|
||||
|
||||
|
@ -161,8 +161,8 @@ import com.google.android.exoplayer.util.ParsableByteArray;
|
||||
if (!hasOutputFormat) {
|
||||
frameDurationUs = (C.MICROS_PER_SECOND * header.samplesPerFrame) / header.sampleRate;
|
||||
MediaFormat mediaFormat = MediaFormat.createAudioFormat(null, header.mimeType,
|
||||
MediaFormat.NO_VALUE, MpegAudioHeader.MAX_FRAME_SIZE_BYTES, C.UNKNOWN_TIME_US,
|
||||
header.channels, header.sampleRate, null, null);
|
||||
MediaFormat.NO_VALUE, MpegAudioHeader.MAX_FRAME_SIZE_BYTES, header.channels,
|
||||
header.sampleRate, null, null);
|
||||
output.format(mediaFormat);
|
||||
hasOutputFormat = true;
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ import com.google.android.exoplayer.util.ParsableByteArray;
|
||||
public SeiReader(TrackOutput output) {
|
||||
super(output);
|
||||
output.format(MediaFormat.createTextFormat(null, MimeTypes.APPLICATION_EIA608,
|
||||
MediaFormat.NO_VALUE, C.UNKNOWN_TIME_US, null));
|
||||
MediaFormat.NO_VALUE, null));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
package com.google.android.exoplayer.extractor.ts;
|
||||
|
||||
import com.google.android.exoplayer.C;
|
||||
import com.google.android.exoplayer.extractor.DummyTrackOutput;
|
||||
import com.google.android.exoplayer.extractor.Extractor;
|
||||
import com.google.android.exoplayer.extractor.ExtractorInput;
|
||||
@ -106,7 +107,7 @@ public final class TsExtractor implements Extractor {
|
||||
@Override
|
||||
public void init(ExtractorOutput output) {
|
||||
this.output = output;
|
||||
output.seekMap(SeekMap.UNSEEKABLE);
|
||||
output.seekMap(new SeekMap.Unseekable(C.UNKNOWN_TIME_US));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -384,7 +384,7 @@ public final class WebmExtractor implements Extractor {
|
||||
} else {
|
||||
// We don't know where the Cues element is located. It's most likely omitted. Allow
|
||||
// playback, but disable seeking.
|
||||
extractorOutput.seekMap(SeekMap.UNSEEKABLE);
|
||||
extractorOutput.seekMap(new SeekMap.Unseekable(durationUs));
|
||||
sentSeekMap = true;
|
||||
}
|
||||
}
|
||||
@ -464,7 +464,7 @@ public final class WebmExtractor implements Extractor {
|
||||
return;
|
||||
case ID_TRACK_ENTRY:
|
||||
if (tracks.get(currentTrack.number) == null && isCodecSupported(currentTrack.codecId)) {
|
||||
currentTrack.initializeOutput(extractorOutput, currentTrack.number, durationUs);
|
||||
currentTrack.initializeOutput(extractorOutput, currentTrack.number);
|
||||
tracks.put(currentTrack.number, currentTrack);
|
||||
} else {
|
||||
// We've seen this track entry before, or the codec is unsupported. Do nothing.
|
||||
@ -966,8 +966,8 @@ public final class WebmExtractor implements Extractor {
|
||||
/**
|
||||
* Builds a {@link SeekMap} from the recently gathered Cues information.
|
||||
*
|
||||
* @return The built {@link SeekMap}. May be {@link SeekMap#UNSEEKABLE} if cues information was
|
||||
* missing or incomplete.
|
||||
* @return The built {@link SeekMap}. The returned {@link SeekMap} may be unseekable if cues
|
||||
* information was missing or incomplete.
|
||||
*/
|
||||
private SeekMap buildSeekMap() {
|
||||
if (segmentContentPosition == UNKNOWN || durationUs == C.UNKNOWN_TIME_US
|
||||
@ -976,7 +976,7 @@ public final class WebmExtractor implements Extractor {
|
||||
// Cues information is missing or incomplete.
|
||||
cueTimesUs = null;
|
||||
cueClusterPositions = null;
|
||||
return SeekMap.UNSEEKABLE;
|
||||
return new SeekMap.Unseekable(durationUs);
|
||||
}
|
||||
int cuePointsSize = cueTimesUs.size();
|
||||
int[] sizes = new int[cuePointsSize];
|
||||
@ -996,7 +996,7 @@ public final class WebmExtractor implements Extractor {
|
||||
durationsUs[cuePointsSize - 1] = durationUs - timesUs[cuePointsSize - 1];
|
||||
cueTimesUs = null;
|
||||
cueClusterPositions = null;
|
||||
return new ChunkIndex(sizes, offsets, durationsUs, timesUs);
|
||||
return new ChunkIndex(durationUs, sizes, offsets, durationsUs, timesUs);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1150,8 +1150,7 @@ public final class WebmExtractor implements Extractor {
|
||||
/**
|
||||
* Initializes the track with an output.
|
||||
*/
|
||||
public void initializeOutput(ExtractorOutput output, int trackId, long durationUs)
|
||||
throws ParserException {
|
||||
public void initializeOutput(ExtractorOutput output, int trackId) throws ParserException {
|
||||
String mimeType;
|
||||
int maxInputSize = MediaFormat.NO_VALUE;
|
||||
List<byte[]> initializationData = null;
|
||||
@ -1237,14 +1236,14 @@ public final class WebmExtractor implements Extractor {
|
||||
// into the trackId passed when creating the formats.
|
||||
if (MimeTypes.isAudio(mimeType)) {
|
||||
format = MediaFormat.createAudioFormat(Integer.toString(trackId), mimeType,
|
||||
MediaFormat.NO_VALUE, maxInputSize, durationUs, channelCount, sampleRate,
|
||||
MediaFormat.NO_VALUE, maxInputSize, channelCount, sampleRate,
|
||||
initializationData, language);
|
||||
} else if (MimeTypes.isVideo(mimeType)) {
|
||||
format = MediaFormat.createVideoFormat(Integer.toString(trackId), mimeType,
|
||||
MediaFormat.NO_VALUE, maxInputSize, durationUs, width, height, initializationData);
|
||||
MediaFormat.NO_VALUE, maxInputSize, width, height, initializationData);
|
||||
} else if (MimeTypes.APPLICATION_SUBRIP.equals(mimeType)) {
|
||||
format = MediaFormat.createTextFormat(Integer.toString(trackId), mimeType,
|
||||
MediaFormat.NO_VALUE, durationUs, language);
|
||||
MediaFormat.NO_VALUE, language);
|
||||
} else {
|
||||
throw new ParserException("Unexpected MIME type.");
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ import java.util.regex.Pattern;
|
||||
@Override
|
||||
public void init(ExtractorOutput output) {
|
||||
this.output = output;
|
||||
output.seekMap(SeekMap.UNSEEKABLE);
|
||||
output.seekMap(new SeekMap.Unseekable(C.UNKNOWN_TIME_US));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -160,7 +160,7 @@ import java.util.regex.Pattern;
|
||||
private TrackOutput buildTrackOutput(long subsampleOffsetUs) {
|
||||
TrackOutput trackOutput = output.track(0);
|
||||
trackOutput.format(MediaFormat.createTextFormat("id", MimeTypes.TEXT_VTT, MediaFormat.NO_VALUE,
|
||||
C.UNKNOWN_TIME_US, "en", subsampleOffsetUs));
|
||||
"en", subsampleOffsetUs));
|
||||
output.endTracks();
|
||||
return trackOutput;
|
||||
}
|
||||
|
@ -72,6 +72,7 @@ public class SmoothStreamingChunkSource implements ChunkSource {
|
||||
|
||||
private boolean manifestFetcherEnabled;
|
||||
private boolean live;
|
||||
private long durationUs;
|
||||
private TrackEncryptionBox[] trackEncryptionBoxes;
|
||||
private DrmInitData.Mapped drmInitData;
|
||||
private SmoothStreamingManifest currentManifest;
|
||||
@ -146,6 +147,7 @@ public class SmoothStreamingChunkSource implements ChunkSource {
|
||||
return false;
|
||||
} else {
|
||||
live = currentManifest.isLive;
|
||||
durationUs = currentManifest.durationUs;
|
||||
ProtectionElement protectionElement = currentManifest.protectionElement;
|
||||
if (protectionElement != null) {
|
||||
byte[] keyId = getProtectionElementKeyId(protectionElement.data);
|
||||
@ -164,6 +166,11 @@ public class SmoothStreamingChunkSource implements ChunkSource {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getDurationUs() {
|
||||
return durationUs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final TrackGroup getTracks() {
|
||||
return trackGroup;
|
||||
@ -373,7 +380,7 @@ public class SmoothStreamingChunkSource implements ChunkSource {
|
||||
switch (element.type) {
|
||||
case StreamElement.TYPE_VIDEO:
|
||||
mediaFormat = MediaFormat.createVideoFormat(format.id, format.mimeType, format.bitrate,
|
||||
MediaFormat.NO_VALUE, durationUs, format.width, format.height, Arrays.asList(csdArray));
|
||||
MediaFormat.NO_VALUE, format.width, format.height, Arrays.asList(csdArray));
|
||||
mp4TrackType = Track.TYPE_vide;
|
||||
break;
|
||||
case StreamElement.TYPE_AUDIO:
|
||||
@ -385,13 +392,13 @@ public class SmoothStreamingChunkSource implements ChunkSource {
|
||||
format.audioSamplingRate, format.audioChannels));
|
||||
}
|
||||
mediaFormat = MediaFormat.createAudioFormat(format.id, format.mimeType, format.bitrate,
|
||||
MediaFormat.NO_VALUE, durationUs, format.audioChannels, format.audioSamplingRate, csd,
|
||||
MediaFormat.NO_VALUE, format.audioChannels, format.audioSamplingRate, csd,
|
||||
format.language);
|
||||
mp4TrackType = Track.TYPE_soun;
|
||||
break;
|
||||
case StreamElement.TYPE_TEXT:
|
||||
mediaFormat = MediaFormat.createTextFormat(format.id, format.mimeType, format.bitrate,
|
||||
durationUs, format.language);
|
||||
format.language);
|
||||
mp4TrackType = Track.TYPE_text;
|
||||
break;
|
||||
default:
|
||||
|
@ -65,12 +65,11 @@ public final class Ac3Util {
|
||||
*
|
||||
* @param data The AC3SpecificBox to parse.
|
||||
* @param trackId The track identifier to set on the format, or null.
|
||||
* @param durationUs The duration to set on the format, in microseconds.
|
||||
* @param language The language to set on the format.
|
||||
* @return The AC-3 format parsed from data in the header.
|
||||
*/
|
||||
public static MediaFormat parseAc3AnnexFFormat(ParsableByteArray data, String trackId,
|
||||
long durationUs, String language) {
|
||||
String language) {
|
||||
int fscod = (data.readUnsignedByte() & 0xC0) >> 6;
|
||||
int sampleRate = SAMPLE_RATE_BY_FSCOD[fscod];
|
||||
int nextByte = data.readUnsignedByte();
|
||||
@ -79,7 +78,7 @@ public final class Ac3Util {
|
||||
channelCount++;
|
||||
}
|
||||
return MediaFormat.createAudioFormat(trackId, MimeTypes.AUDIO_AC3, MediaFormat.NO_VALUE,
|
||||
MediaFormat.NO_VALUE, durationUs, channelCount, sampleRate, null, language);
|
||||
MediaFormat.NO_VALUE, channelCount, sampleRate, null, language);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -88,12 +87,11 @@ public final class Ac3Util {
|
||||
*
|
||||
* @param data The EC3SpecificBox to parse.
|
||||
* @param trackId The track identifier to set on the format, or null.
|
||||
* @param durationUs The duration to set on the format, in microseconds.
|
||||
* @param language The language to set on the format.
|
||||
* @return The E-AC-3 format parsed from data in the header.
|
||||
*/
|
||||
public static MediaFormat parseEAc3AnnexFFormat(ParsableByteArray data, String trackId,
|
||||
long durationUs, String language) {
|
||||
String language) {
|
||||
data.skipBytes(2); // data_rate, num_ind_sub
|
||||
|
||||
// Read only the first substream.
|
||||
@ -106,7 +104,7 @@ public final class Ac3Util {
|
||||
channelCount++;
|
||||
}
|
||||
return MediaFormat.createAudioFormat(trackId, MimeTypes.AUDIO_E_AC3, MediaFormat.NO_VALUE,
|
||||
MediaFormat.NO_VALUE, durationUs, channelCount, sampleRate, null, language);
|
||||
MediaFormat.NO_VALUE, channelCount, sampleRate, null, language);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -115,12 +113,11 @@ public final class Ac3Util {
|
||||
*
|
||||
* @param data The data to parse, positioned at the start of the syncframe.
|
||||
* @param trackId The track identifier to set on the format, or null.
|
||||
* @param durationUs The duration to set on the format, in microseconds.
|
||||
* @param language The language to set on the format.
|
||||
* @return The AC-3 format parsed from data in the header.
|
||||
*/
|
||||
public static MediaFormat parseAc3SyncframeFormat(ParsableBitArray data, String trackId,
|
||||
long durationUs, String language) {
|
||||
String language) {
|
||||
data.skipBits(16 + 16); // syncword, crc1
|
||||
int fscod = data.readBits(2);
|
||||
data.skipBits(6 + 5 + 3); // frmsizecod, bsid, bsmod
|
||||
@ -136,7 +133,7 @@ public final class Ac3Util {
|
||||
}
|
||||
boolean lfeon = data.readBit();
|
||||
return MediaFormat.createAudioFormat(trackId, MimeTypes.AUDIO_AC3, MediaFormat.NO_VALUE,
|
||||
MediaFormat.NO_VALUE, durationUs, CHANNEL_COUNT_BY_ACMOD[acmod] + (lfeon ? 1 : 0),
|
||||
MediaFormat.NO_VALUE, CHANNEL_COUNT_BY_ACMOD[acmod] + (lfeon ? 1 : 0),
|
||||
SAMPLE_RATE_BY_FSCOD[fscod], null, language);
|
||||
}
|
||||
|
||||
@ -146,12 +143,11 @@ public final class Ac3Util {
|
||||
*
|
||||
* @param data The data to parse, positioned at the start of the syncframe.
|
||||
* @param trackId The track identifier to set on the format, or null.
|
||||
* @param durationUs The duration to set on the format, in microseconds.
|
||||
* @param language The language to set on the format.
|
||||
* @return The E-AC-3 format parsed from data in the header.
|
||||
*/
|
||||
public static MediaFormat parseEac3SyncframeFormat(ParsableBitArray data, String trackId,
|
||||
long durationUs, String language) {
|
||||
String language) {
|
||||
data.skipBits(16 + 2 + 3 + 11); // syncword, strmtype, substreamid, frmsiz
|
||||
int sampleRate;
|
||||
int fscod = data.readBits(2);
|
||||
@ -164,8 +160,8 @@ public final class Ac3Util {
|
||||
int acmod = data.readBits(3);
|
||||
boolean lfeon = data.readBit();
|
||||
return MediaFormat.createAudioFormat(trackId, MimeTypes.AUDIO_E_AC3, MediaFormat.NO_VALUE,
|
||||
MediaFormat.NO_VALUE, durationUs, CHANNEL_COUNT_BY_ACMOD[acmod] + (lfeon ? 1 : 0),
|
||||
sampleRate, null, language);
|
||||
MediaFormat.NO_VALUE, CHANNEL_COUNT_BY_ACMOD[acmod] + (lfeon ? 1 : 0), sampleRate, null,
|
||||
language);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -53,12 +53,10 @@ public final class DtsUtil {
|
||||
*
|
||||
* @param frame The DTS frame to parse.
|
||||
* @param trackId The track identifier to set on the format, or null.
|
||||
* @param durationUs The duration to set on the format, in microseconds.
|
||||
* @param language The language to set on the format.
|
||||
* @return The DTS format parsed from data in the header.
|
||||
*/
|
||||
public static MediaFormat parseDtsFormat(byte[] frame, String trackId, long durationUs,
|
||||
String language) {
|
||||
public static MediaFormat parseDtsFormat(byte[] frame, String trackId, String language) {
|
||||
ParsableBitArray frameBits = SCRATCH_BITS;
|
||||
frameBits.reset(frame);
|
||||
frameBits.skipBits(4 * 8 + 1 + 5 + 1 + 7 + 14); // SYNC, FTYPE, SHORT, CPF, NBLKS, FSIZE
|
||||
@ -72,7 +70,7 @@ public final class DtsUtil {
|
||||
frameBits.skipBits(10); // MIX, DYNF, TIMEF, AUXF, HDCD, EXT_AUDIO_ID, EXT_AUDIO, ASPF
|
||||
channelCount += frameBits.readBits(2) > 0 ? 1 : 0; // LFF
|
||||
return MediaFormat.createAudioFormat(trackId, MimeTypes.AUDIO_DTS, bitrate,
|
||||
MediaFormat.NO_VALUE, durationUs, channelCount, sampleRate, null, language);
|
||||
MediaFormat.NO_VALUE, channelCount, sampleRate, null, language);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
x
Reference in New Issue
Block a user