mirror of
https://github.com/androidx/media.git
synced 2025-04-30 06:46:50 +08:00
Split CompositionPlayerSeekTest.playSequenceAndGetTimestampsUs
This method is already quite complex and I will need to add more complexity, so split it into multiple smaller methods. This CL is a refactoring. There are no functional changes. PiperOrigin-RevId: 713266816
This commit is contained in:
parent
79d41aac7e
commit
30038079c4
@ -579,59 +579,15 @@ public class CompositionPlayerSeekTest {
|
|||||||
int numberOfFramesBeforeSeeking,
|
int numberOfFramesBeforeSeeking,
|
||||||
long seekTimeMs)
|
long seekTimeMs)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
ResettableCountDownLatch framesReceivedLatch =
|
ResettableCountDownLatch frameCountBeforeBlockLatch =
|
||||||
new ResettableCountDownLatch(numberOfFramesBeforeSeeking);
|
new ResettableCountDownLatch(numberOfFramesBeforeSeeking);
|
||||||
AtomicBoolean shaderProgramShouldBlockInput = new AtomicBoolean();
|
|
||||||
|
|
||||||
InputTimestampRecordingShaderProgram inputTimestampRecordingShaderProgram =
|
InputTimestampRecordingShaderProgram inputTimestampRecordingShaderProgram =
|
||||||
new InputTimestampRecordingShaderProgram() {
|
createInputTimestampRecordingShaderProgram(frameCountBeforeBlockLatch);
|
||||||
|
Effect videoEffect = (GlEffect) (context, useHdr) -> inputTimestampRecordingShaderProgram;
|
||||||
@Override
|
List<EditedMediaItem> editedMediaItems =
|
||||||
public void queueInputFrame(
|
createEditedMediaItems(mediaItems, durationsUs, videoEffect);
|
||||||
GlObjectsProvider glObjectsProvider,
|
|
||||||
GlTextureInfo inputTexture,
|
|
||||||
long presentationTimeUs) {
|
|
||||||
super.queueInputFrame(glObjectsProvider, inputTexture, presentationTimeUs);
|
|
||||||
framesReceivedLatch.countDown();
|
|
||||||
if (framesReceivedLatch.getCount() == 0) {
|
|
||||||
shaderProgramShouldBlockInput.set(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void releaseOutputFrame(GlTextureInfo outputTexture) {
|
|
||||||
// The input listener capacity is reported in the super method, block input by skip
|
|
||||||
// reporting input capacity.
|
|
||||||
if (shaderProgramShouldBlockInput.get()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
super.releaseOutputFrame(outputTexture);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void flush() {
|
|
||||||
super.flush();
|
|
||||||
if (framesReceivedLatch.getCount() == 0) {
|
|
||||||
// The flush is caused by the seek operation. We do this check because the shader
|
|
||||||
// program can be flushed for other reasons, for example at the transition between 2
|
|
||||||
// renderers.
|
|
||||||
shaderProgramShouldBlockInput.set(false);
|
|
||||||
framesReceivedLatch.reset(Integer.MAX_VALUE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
List<EditedMediaItem> editedMediaItems = new ArrayList<>();
|
|
||||||
for (int i = 0; i < mediaItems.size(); i++) {
|
|
||||||
editedMediaItems.add(
|
|
||||||
createEditedMediaItem(
|
|
||||||
mediaItems.get(i),
|
|
||||||
durationsUs.get(i),
|
|
||||||
/* videoEffect= */ (GlEffect)
|
|
||||||
(context, useHdr) -> inputTimestampRecordingShaderProgram));
|
|
||||||
}
|
|
||||||
|
|
||||||
CountDownLatch videoGraphEnded = new CountDownLatch(1);
|
CountDownLatch videoGraphEnded = new CountDownLatch(1);
|
||||||
|
|
||||||
getInstrumentation()
|
getInstrumentation()
|
||||||
.runOnMainSync(
|
.runOnMainSync(
|
||||||
() -> {
|
() -> {
|
||||||
@ -653,7 +609,7 @@ public class CompositionPlayerSeekTest {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Wait until the number of frames are received, block further input on the shader program.
|
// Wait until the number of frames are received, block further input on the shader program.
|
||||||
framesReceivedLatch.await();
|
frameCountBeforeBlockLatch.await();
|
||||||
getInstrumentation().runOnMainSync(() -> compositionPlayer.seekTo(seekTimeMs));
|
getInstrumentation().runOnMainSync(() -> compositionPlayer.seekTo(seekTimeMs));
|
||||||
playerTestListener.waitUntilPlayerEnded();
|
playerTestListener.waitUntilPlayerEnded();
|
||||||
|
|
||||||
@ -661,6 +617,72 @@ public class CompositionPlayerSeekTest {
|
|||||||
return inputTimestampRecordingShaderProgram.getInputTimestampsUs();
|
return inputTimestampRecordingShaderProgram.getInputTimestampsUs();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an {@link InputTimestampRecordingShaderProgram} that blocks input after receiving the
|
||||||
|
* number of frames specified by the provided {@link ResettableCountDownLatch}.
|
||||||
|
*
|
||||||
|
* <p>Input is unblocked when the shader program is flushed.
|
||||||
|
*/
|
||||||
|
private static InputTimestampRecordingShaderProgram createInputTimestampRecordingShaderProgram(
|
||||||
|
ResettableCountDownLatch frameCountBeforeBlockLatch) {
|
||||||
|
AtomicBoolean shaderProgramShouldBlockInput = new AtomicBoolean();
|
||||||
|
return new InputTimestampRecordingShaderProgram() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void queueInputFrame(
|
||||||
|
GlObjectsProvider glObjectsProvider,
|
||||||
|
GlTextureInfo inputTexture,
|
||||||
|
long presentationTimeUs) {
|
||||||
|
super.queueInputFrame(glObjectsProvider, inputTexture, presentationTimeUs);
|
||||||
|
frameCountBeforeBlockLatch.countDown();
|
||||||
|
if (frameCountBeforeBlockLatch.getCount() == 0) {
|
||||||
|
shaderProgramShouldBlockInput.set(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void releaseOutputFrame(GlTextureInfo outputTexture) {
|
||||||
|
// The input listener capacity is reported in the super method, block input by skip
|
||||||
|
// reporting input capacity.
|
||||||
|
if (shaderProgramShouldBlockInput.get()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
super.releaseOutputFrame(outputTexture);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flush() {
|
||||||
|
super.flush();
|
||||||
|
if (frameCountBeforeBlockLatch.getCount() == 0) {
|
||||||
|
// The flush is caused by the seek operation. We do this check because the shader
|
||||||
|
// program can be flushed for other reasons, for example at the transition between 2
|
||||||
|
// renderers.
|
||||||
|
shaderProgramShouldBlockInput.set(false);
|
||||||
|
frameCountBeforeBlockLatch.reset(Integer.MAX_VALUE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a list of {@linkplain EditedMediaItem EditedMediaItems}.
|
||||||
|
*
|
||||||
|
* @param mediaItems The {@linkplain MediaItem MediaItems} that should be wrapped.
|
||||||
|
* @param durationsUs The durations of the {@linkplain EditedMediaItem EditedMediaItems}, in
|
||||||
|
* microseconds.
|
||||||
|
* @param videoEffect The {@link Effect} to apply to each {@link EditedMediaItem}.
|
||||||
|
* @return A list of {@linkplain EditedMediaItem EditedMediaItems}.
|
||||||
|
*/
|
||||||
|
private static List<EditedMediaItem> createEditedMediaItems(
|
||||||
|
List<MediaItem> mediaItems, List<Long> durationsUs, Effect videoEffect) {
|
||||||
|
List<EditedMediaItem> editedMediaItems = new ArrayList<>();
|
||||||
|
for (int i = 0; i < mediaItems.size(); i++) {
|
||||||
|
editedMediaItems.add(
|
||||||
|
createEditedMediaItem(mediaItems.get(i), durationsUs.get(i), videoEffect));
|
||||||
|
}
|
||||||
|
return editedMediaItems;
|
||||||
|
}
|
||||||
|
|
||||||
private static EditedMediaItem createEditedMediaItem(
|
private static EditedMediaItem createEditedMediaItem(
|
||||||
MediaItem mediaItem, long durationUs, Effect videoEffect) {
|
MediaItem mediaItem, long durationUs, Effect videoEffect) {
|
||||||
return new EditedMediaItem.Builder(mediaItem)
|
return new EditedMediaItem.Builder(mediaItem)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user