Add method to obtain media duration from playout duration
This is the inverse operation of the already existing method. PiperOrigin-RevId: 633207017
This commit is contained in:
parent
3a3145521b
commit
d52a32aadc
@ -226,6 +226,41 @@ public final class SpeedChangingAudioProcessor extends BaseAudioProcessor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the input media duration for the given playout duration.
|
||||||
|
*
|
||||||
|
* <p>Both durations are counted from the last {@link #reset()} or {@link #flush()} of the audio
|
||||||
|
* processor.
|
||||||
|
*
|
||||||
|
* <p>The {@code playoutDurationUs} must be less than last processed buffer output time.
|
||||||
|
*
|
||||||
|
* @param playoutDurationUs The playout duration in microseconds.
|
||||||
|
* @return The corresponding input duration in microseconds.
|
||||||
|
*/
|
||||||
|
public long getMediaDurationUs(long playoutDurationUs) {
|
||||||
|
int floorIndex = outputSegmentStartTimesUs.size() - 1;
|
||||||
|
while (floorIndex > 0 && outputSegmentStartTimesUs.get(floorIndex) > playoutDurationUs) {
|
||||||
|
floorIndex--;
|
||||||
|
}
|
||||||
|
long lastSegmentOutputDurationUs =
|
||||||
|
playoutDurationUs - outputSegmentStartTimesUs.get(floorIndex);
|
||||||
|
long lastSegmentInputDurationUs;
|
||||||
|
if (floorIndex == outputSegmentStartTimesUs.size() - 1) {
|
||||||
|
lastSegmentInputDurationUs = getMediaDurationUsAtCurrentSpeed(lastSegmentOutputDurationUs);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
lastSegmentInputDurationUs =
|
||||||
|
round(
|
||||||
|
lastSegmentOutputDurationUs
|
||||||
|
* divide(
|
||||||
|
inputSegmentStartTimesUs.get(floorIndex + 1)
|
||||||
|
- inputSegmentStartTimesUs.get(floorIndex),
|
||||||
|
outputSegmentStartTimesUs.get(floorIndex + 1)
|
||||||
|
- outputSegmentStartTimesUs.get(floorIndex)));
|
||||||
|
}
|
||||||
|
return inputSegmentStartTimesUs.get(floorIndex) + lastSegmentInputDurationUs;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assuming enough audio has been processed, calculates the time at which the {@code inputTimeUs}
|
* Assuming enough audio has been processed, calculates the time at which the {@code inputTimeUs}
|
||||||
* is outputted at after the speed changes has been applied.
|
* is outputted at after the speed changes has been applied.
|
||||||
@ -293,6 +328,12 @@ public final class SpeedChangingAudioProcessor extends BaseAudioProcessor {
|
|||||||
: mediaDurationUs;
|
: mediaDurationUs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private long getMediaDurationUsAtCurrentSpeed(long playoutDurationUs) {
|
||||||
|
return isUsingSonic()
|
||||||
|
? sonicAudioProcessor.getMediaDuration(playoutDurationUs)
|
||||||
|
: playoutDurationUs;
|
||||||
|
}
|
||||||
|
|
||||||
private long updateLastProcessedInputTime() {
|
private long updateLastProcessedInputTime() {
|
||||||
if (isUsingSonic()) {
|
if (isUsingSonic()) {
|
||||||
// TODO - b/320242819: Investigate whether bytesRead can be used here rather than
|
// TODO - b/320242819: Investigate whether bytesRead can be used here rather than
|
||||||
|
@ -493,6 +493,44 @@ public class SpeedChangingAudioProcessorTest {
|
|||||||
assertThat(outputTimesUs).containsExactly(243L);
|
assertThat(outputTimesUs).containsExactly(243L);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void getMediaDurationUs_returnsCorrectValues() throws Exception {
|
||||||
|
// The speed changes happen every 10ms (441 samples @ 441.KHz)
|
||||||
|
SpeedProvider speedProvider =
|
||||||
|
TestSpeedProvider.createWithFrameCounts(
|
||||||
|
AUDIO_FORMAT,
|
||||||
|
/* frameCounts= */ new int[] {441, 441, 441, 441},
|
||||||
|
/* speeds= */ new float[] {2, 1, 5, 2});
|
||||||
|
SpeedChangingAudioProcessor speedChangingAudioProcessor =
|
||||||
|
getConfiguredSpeedChangingAudioProcessor(speedProvider);
|
||||||
|
ByteBuffer inputBuffer = getInputBuffer(/* frameCount= */ 441 * 4);
|
||||||
|
while (inputBuffer.position() < inputBuffer.limit()) {
|
||||||
|
speedChangingAudioProcessor.queueInput(inputBuffer);
|
||||||
|
}
|
||||||
|
getAudioProcessorOutput(speedChangingAudioProcessor);
|
||||||
|
|
||||||
|
// input (in ms) (0, 10, 20, 30, 40) ->
|
||||||
|
// output (in ms) (0, 10/2, 10/2 + 10, 10/2 + 10 + 10/5, 10/2 + 10 + 10/5 + 10/2)
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 0))
|
||||||
|
.isEqualTo(0);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 3_000))
|
||||||
|
.isEqualTo(6_000);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 5_000))
|
||||||
|
.isEqualTo(10_000);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 10_000))
|
||||||
|
.isEqualTo(15_000);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 15_000))
|
||||||
|
.isEqualTo(20_000);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 16_000))
|
||||||
|
.isEqualTo(25_000);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 17_000))
|
||||||
|
.isEqualTo(30_000);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 18_000))
|
||||||
|
.isEqualTo(32_000);
|
||||||
|
assertThat(speedChangingAudioProcessor.getMediaDurationUs(/* playoutDurationUs= */ 22_000))
|
||||||
|
.isEqualTo(40_000);
|
||||||
|
}
|
||||||
|
|
||||||
private static SpeedChangingAudioProcessor getConfiguredSpeedChangingAudioProcessor(
|
private static SpeedChangingAudioProcessor getConfiguredSpeedChangingAudioProcessor(
|
||||||
SpeedProvider speedProvider) throws AudioProcessor.UnhandledAudioFormatException {
|
SpeedProvider speedProvider) throws AudioProcessor.UnhandledAudioFormatException {
|
||||||
SpeedChangingAudioProcessor speedChangingAudioProcessor =
|
SpeedChangingAudioProcessor speedChangingAudioProcessor =
|
||||||
|
Loading…
x
Reference in New Issue
Block a user