mirror of
https://github.com/androidx/media.git
synced 2025-05-15 11:39:56 +08:00
Disable cache fragmentation except for progressive
DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION is added to indicate to the cache when fragmentation is allowed. This flag is set for progressive requests only. To avoid breaking changes, CacheDataSink defaults to ignoring the flag (and enabling fragmentation) for now. Respecting the flag can be enabled manually. DownloaderConstructorHelper enables respecting of the flag. Issue: #4253 PiperOrigin-RevId: 229176835
This commit is contained in:
parent
86637facdd
commit
1b62277a0b
@ -44,6 +44,8 @@
|
|||||||
* Fix issue with reusing a `ClippingMediaSource` with an inner
|
* Fix issue with reusing a `ClippingMediaSource` with an inner
|
||||||
`ExtractorMediaSource` and a non-zero start position
|
`ExtractorMediaSource` and a non-zero start position
|
||||||
([#5351](https://github.com/google/ExoPlayer/issues/5351)).
|
([#5351](https://github.com/google/ExoPlayer/issues/5351)).
|
||||||
|
* Downloading/Caching: Improve cache performance
|
||||||
|
([#4253](https://github.com/google/ExoPlayer/issues/4253)).
|
||||||
|
|
||||||
### 2.9.3 ###
|
### 2.9.3 ###
|
||||||
|
|
||||||
|
@ -109,16 +109,18 @@ public final class DownloaderConstructorHelper {
|
|||||||
cacheReadDataSourceFactory != null
|
cacheReadDataSourceFactory != null
|
||||||
? cacheReadDataSourceFactory
|
? cacheReadDataSourceFactory
|
||||||
: new FileDataSourceFactory();
|
: new FileDataSourceFactory();
|
||||||
DataSink.Factory writeDataSinkFactory =
|
if (cacheWriteDataSinkFactory == null) {
|
||||||
cacheWriteDataSinkFactory != null
|
CacheDataSinkFactory factory =
|
||||||
? cacheWriteDataSinkFactory
|
new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_FRAGMENT_SIZE);
|
||||||
: new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_MAX_CACHE_FILE_SIZE);
|
factory.experimental_setRespectCacheFragmentationFlag(true);
|
||||||
|
cacheWriteDataSinkFactory = factory;
|
||||||
|
}
|
||||||
onlineCacheDataSourceFactory =
|
onlineCacheDataSourceFactory =
|
||||||
new CacheDataSourceFactory(
|
new CacheDataSourceFactory(
|
||||||
cache,
|
cache,
|
||||||
upstreamFactory,
|
upstreamFactory,
|
||||||
readDataSourceFactory,
|
readDataSourceFactory,
|
||||||
writeDataSinkFactory,
|
cacheWriteDataSinkFactory,
|
||||||
CacheDataSource.FLAG_BLOCK_ON_CACHE,
|
CacheDataSource.FLAG_BLOCK_ON_CACHE,
|
||||||
/* eventListener= */ null,
|
/* eventListener= */ null,
|
||||||
cacheKeyFactory);
|
cacheKeyFactory);
|
||||||
|
@ -53,7 +53,11 @@ public final class ProgressiveDownloader implements Downloader {
|
|||||||
Uri uri, @Nullable String customCacheKey, DownloaderConstructorHelper constructorHelper) {
|
Uri uri, @Nullable String customCacheKey, DownloaderConstructorHelper constructorHelper) {
|
||||||
this.dataSpec =
|
this.dataSpec =
|
||||||
new DataSpec(
|
new DataSpec(
|
||||||
uri, /* absoluteStreamPosition= */ 0, C.LENGTH_UNSET, customCacheKey, /* flags= */ 0);
|
uri,
|
||||||
|
/* absoluteStreamPosition= */ 0,
|
||||||
|
C.LENGTH_UNSET,
|
||||||
|
customCacheKey,
|
||||||
|
/* flags= */ DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION);
|
||||||
this.cache = constructorHelper.getCache();
|
this.cache = constructorHelper.getCache();
|
||||||
this.dataSource = constructorHelper.createCacheDataSource();
|
this.dataSource = constructorHelper.createCacheDataSource();
|
||||||
this.cacheKeyFactory = constructorHelper.getCacheKeyFactory();
|
this.cacheKeyFactory = constructorHelper.getCacheKeyFactory();
|
||||||
|
@ -988,7 +988,9 @@ import org.checkerframework.checker.nullness.compatqual.NullableType;
|
|||||||
position,
|
position,
|
||||||
C.LENGTH_UNSET,
|
C.LENGTH_UNSET,
|
||||||
customCacheKey,
|
customCacheKey,
|
||||||
DataSpec.FLAG_ALLOW_ICY_METADATA | DataSpec.FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN);
|
DataSpec.FLAG_ALLOW_ICY_METADATA
|
||||||
|
| DataSpec.FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN
|
||||||
|
| DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setLoadPosition(long position, long timeUs) {
|
private void setLoadPosition(long position, long timeUs) {
|
||||||
|
@ -32,14 +32,19 @@ public final class DataSpec {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* The flags that apply to any request for data. Possible flag values are {@link
|
* The flags that apply to any request for data. Possible flag values are {@link
|
||||||
* #FLAG_ALLOW_GZIP}, {@link #FLAG_ALLOW_ICY_METADATA} and {@link
|
* #FLAG_ALLOW_GZIP}, {@link #FLAG_ALLOW_ICY_METADATA}, {@link #FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN}
|
||||||
* #FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN}.
|
* and {@link #FLAG_ALLOW_CACHE_FRAGMENTATION}.
|
||||||
*/
|
*/
|
||||||
@Documented
|
@Documented
|
||||||
@Retention(RetentionPolicy.SOURCE)
|
@Retention(RetentionPolicy.SOURCE)
|
||||||
@IntDef(
|
@IntDef(
|
||||||
flag = true,
|
flag = true,
|
||||||
value = {FLAG_ALLOW_GZIP, FLAG_ALLOW_ICY_METADATA, FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN})
|
value = {
|
||||||
|
FLAG_ALLOW_GZIP,
|
||||||
|
FLAG_ALLOW_ICY_METADATA,
|
||||||
|
FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN,
|
||||||
|
FLAG_ALLOW_CACHE_FRAGMENTATION
|
||||||
|
})
|
||||||
public @interface Flags {}
|
public @interface Flags {}
|
||||||
/**
|
/**
|
||||||
* Allows an underlying network stack to request that the server use gzip compression.
|
* Allows an underlying network stack to request that the server use gzip compression.
|
||||||
@ -53,12 +58,17 @@ public final class DataSpec {
|
|||||||
* DataSource#read(byte[], int, int)} will be the decompressed data.
|
* DataSource#read(byte[], int, int)} will be the decompressed data.
|
||||||
*/
|
*/
|
||||||
public static final int FLAG_ALLOW_GZIP = 1;
|
public static final int FLAG_ALLOW_GZIP = 1;
|
||||||
|
|
||||||
/** Allows an underlying network stack to request that the stream contain ICY metadata. */
|
/** Allows an underlying network stack to request that the stream contain ICY metadata. */
|
||||||
public static final int FLAG_ALLOW_ICY_METADATA = 1 << 1; // 2
|
public static final int FLAG_ALLOW_ICY_METADATA = 1 << 1; // 2
|
||||||
|
|
||||||
/** Prevents caching if the length cannot be resolved when the {@link DataSource} is opened. */
|
/** Prevents caching if the length cannot be resolved when the {@link DataSource} is opened. */
|
||||||
public static final int FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN = 1 << 2; // 4
|
public static final int FLAG_DONT_CACHE_IF_LENGTH_UNKNOWN = 1 << 2; // 4
|
||||||
|
/**
|
||||||
|
* Allows fragmentation of this request into multiple cache files, meaning a cache eviction policy
|
||||||
|
* will be able to evict individual fragments of the data. Depending on the cache implementation,
|
||||||
|
* setting this flag may also enable more concurrent access to the data (e.g. reading one fragment
|
||||||
|
* whilst writing another).
|
||||||
|
*/
|
||||||
|
public static final int FLAG_ALLOW_CACHE_FRAGMENTATION = 1 << 4; // 8
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The set of HTTP methods that are supported by ExoPlayer {@link HttpDataSource}s. One of {@link
|
* The set of HTTP methods that are supported by ExoPlayer {@link HttpDataSource}s. One of {@link
|
||||||
|
@ -37,20 +37,22 @@ import java.io.OutputStream;
|
|||||||
*/
|
*/
|
||||||
public final class CacheDataSink implements DataSink {
|
public final class CacheDataSink implements DataSink {
|
||||||
|
|
||||||
/** Default {@code maxCacheFileSize} recommended for caching use cases. */
|
/** Default {@code fragmentSize} recommended for caching use cases. */
|
||||||
public static final long DEFAULT_MAX_CACHE_FILE_SIZE = 5 * 1024 * 1024;
|
public static final long DEFAULT_FRAGMENT_SIZE = 5 * 1024 * 1024;
|
||||||
/** Default buffer size in bytes. */
|
/** Default buffer size in bytes. */
|
||||||
public static final int DEFAULT_BUFFER_SIZE = 20 * 1024;
|
public static final int DEFAULT_BUFFER_SIZE = 20 * 1024;
|
||||||
|
|
||||||
private static final long MIN_RECOMMENDED_MAX_CACHE_FILE_SIZE = 2 * 1024 * 1024;
|
private static final long MIN_RECOMMENDED_FRAGMENT_SIZE = 2 * 1024 * 1024;
|
||||||
private static final String TAG = "CacheDataSink";
|
private static final String TAG = "CacheDataSink";
|
||||||
|
|
||||||
private final Cache cache;
|
private final Cache cache;
|
||||||
private final long maxCacheFileSize;
|
private final long fragmentSize;
|
||||||
private final int bufferSize;
|
private final int bufferSize;
|
||||||
|
|
||||||
private boolean syncFileDescriptor;
|
private boolean syncFileDescriptor;
|
||||||
|
private boolean respectCacheFragmentationFlag;
|
||||||
private DataSpec dataSpec;
|
private DataSpec dataSpec;
|
||||||
|
private long dataSpecFragmentSize;
|
||||||
private File file;
|
private File file;
|
||||||
private OutputStream outputStream;
|
private OutputStream outputStream;
|
||||||
private FileOutputStream underlyingFileOutputStream;
|
private FileOutputStream underlyingFileOutputStream;
|
||||||
@ -73,42 +75,39 @@ public final class CacheDataSink implements DataSink {
|
|||||||
* Constructs an instance using {@link #DEFAULT_BUFFER_SIZE}.
|
* Constructs an instance using {@link #DEFAULT_BUFFER_SIZE}.
|
||||||
*
|
*
|
||||||
* @param cache The cache into which data should be written.
|
* @param cache The cache into which data should be written.
|
||||||
* @param maxCacheFileSize The maximum size of a cache file, in bytes. If a request results in
|
* @param fragmentSize For requests that should be fragmented into multiple cache files, this is
|
||||||
* data being written whose size exceeds this value, then the data will be fragmented into
|
* the maximum size of a cache file in bytes. If set to {@link C#LENGTH_UNSET} then no
|
||||||
* multiple cache files. If set to {@link C#LENGTH_UNSET} then no fragmentation will occur.
|
* fragmentation will occur. Using a small value allows for finer-grained cache eviction
|
||||||
* Using a small value allows for finer-grained cache eviction policies, at the cost of
|
* policies, at the cost of increased overhead both on the cache implementation and the file
|
||||||
* increased overhead both on the cache implementation and the file system. Values under
|
* system. Values under {@code (2 * 1024 * 1024)} are not recommended.
|
||||||
* {@code (2 * 1024 * 1024)} are not recommended.
|
|
||||||
*/
|
*/
|
||||||
public CacheDataSink(Cache cache, long maxCacheFileSize) {
|
public CacheDataSink(Cache cache, long fragmentSize) {
|
||||||
this(cache, maxCacheFileSize, DEFAULT_BUFFER_SIZE);
|
this(cache, fragmentSize, DEFAULT_BUFFER_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param cache The cache into which data should be written.
|
* @param cache The cache into which data should be written.
|
||||||
* @param maxCacheFileSize The maximum size of a cache file, in bytes. If a request results in
|
* @param fragmentSize For requests that should be fragmented into multiple cache files, this is
|
||||||
* data being written whose size exceeds this value, then the data will be fragmented into
|
* the maximum size of a cache file in bytes. If set to {@link C#LENGTH_UNSET} then no
|
||||||
* multiple cache files. If set to {@link C#LENGTH_UNSET} then no fragmentation will occur.
|
* fragmentation will occur. Using a small value allows for finer-grained cache eviction
|
||||||
* Using a small value allows for finer-grained cache eviction policies, at the cost of
|
* policies, at the cost of increased overhead both on the cache implementation and the file
|
||||||
* increased overhead both on the cache implementation and the file system. Values under
|
* system. Values under {@code (2 * 1024 * 1024)} are not recommended.
|
||||||
* {@code (2 * 1024 * 1024)} are not recommended.
|
|
||||||
* @param bufferSize The buffer size in bytes for writing to a cache file. A zero or negative
|
* @param bufferSize The buffer size in bytes for writing to a cache file. A zero or negative
|
||||||
* value disables buffering.
|
* value disables buffering.
|
||||||
*/
|
*/
|
||||||
public CacheDataSink(Cache cache, long maxCacheFileSize, int bufferSize) {
|
public CacheDataSink(Cache cache, long fragmentSize, int bufferSize) {
|
||||||
Assertions.checkState(
|
Assertions.checkState(
|
||||||
maxCacheFileSize > 0 || maxCacheFileSize == C.LENGTH_UNSET,
|
fragmentSize > 0 || fragmentSize == C.LENGTH_UNSET,
|
||||||
"maxCacheFileSize must be positive or C.LENGTH_UNSET.");
|
"fragmentSize must be positive or C.LENGTH_UNSET.");
|
||||||
if (maxCacheFileSize != C.LENGTH_UNSET
|
if (fragmentSize != C.LENGTH_UNSET && fragmentSize < MIN_RECOMMENDED_FRAGMENT_SIZE) {
|
||||||
&& maxCacheFileSize < MIN_RECOMMENDED_MAX_CACHE_FILE_SIZE) {
|
|
||||||
Log.w(
|
Log.w(
|
||||||
TAG,
|
TAG,
|
||||||
"maxCacheFileSize is below the minimum recommended value of "
|
"fragmentSize is below the minimum recommended value of "
|
||||||
+ MIN_RECOMMENDED_MAX_CACHE_FILE_SIZE
|
+ MIN_RECOMMENDED_FRAGMENT_SIZE
|
||||||
+ ". This may cause poor cache performance.");
|
+ ". This may cause poor cache performance.");
|
||||||
}
|
}
|
||||||
this.cache = Assertions.checkNotNull(cache);
|
this.cache = Assertions.checkNotNull(cache);
|
||||||
this.maxCacheFileSize = maxCacheFileSize == C.LENGTH_UNSET ? Long.MAX_VALUE : maxCacheFileSize;
|
this.fragmentSize = fragmentSize == C.LENGTH_UNSET ? Long.MAX_VALUE : fragmentSize;
|
||||||
this.bufferSize = bufferSize;
|
this.bufferSize = bufferSize;
|
||||||
syncFileDescriptor = true;
|
syncFileDescriptor = true;
|
||||||
}
|
}
|
||||||
@ -116,8 +115,7 @@ public final class CacheDataSink implements DataSink {
|
|||||||
/**
|
/**
|
||||||
* Sets whether file descriptors are synced when closing output streams.
|
* Sets whether file descriptors are synced when closing output streams.
|
||||||
*
|
*
|
||||||
* <p>This method is experimental, and will be renamed or removed in a future release. It should
|
* <p>This method is experimental, and will be renamed or removed in a future release.
|
||||||
* only be called before the renderer is used.
|
|
||||||
*
|
*
|
||||||
* @param syncFileDescriptor Whether file descriptors are synced when closing output streams.
|
* @param syncFileDescriptor Whether file descriptors are synced when closing output streams.
|
||||||
*/
|
*/
|
||||||
@ -125,6 +123,20 @@ public final class CacheDataSink implements DataSink {
|
|||||||
this.syncFileDescriptor = syncFileDescriptor;
|
this.syncFileDescriptor = syncFileDescriptor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets whether this instance respects the {@link DataSpec#FLAG_ALLOW_CACHE_FRAGMENTATION} flag.
|
||||||
|
* If set to {@code false} requests will always be fragmented. If set to {@code true} requests
|
||||||
|
* will be fragmented only if the flag is set.
|
||||||
|
*
|
||||||
|
* <p>This method is experimental, and will be renamed or removed in a future release.
|
||||||
|
*
|
||||||
|
* @param respectCacheFragmentationFlag Whether to respect the {@link
|
||||||
|
* DataSpec#FLAG_ALLOW_CACHE_FRAGMENTATION} flag.
|
||||||
|
*/
|
||||||
|
public void experimental_setRespectCacheFragmentationFlag(boolean respectCacheFragmentationFlag) {
|
||||||
|
this.respectCacheFragmentationFlag = respectCacheFragmentationFlag;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void open(DataSpec dataSpec) throws CacheDataSinkException {
|
public void open(DataSpec dataSpec) throws CacheDataSinkException {
|
||||||
if (dataSpec.length == C.LENGTH_UNSET
|
if (dataSpec.length == C.LENGTH_UNSET
|
||||||
@ -133,6 +145,11 @@ public final class CacheDataSink implements DataSink {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
this.dataSpec = dataSpec;
|
this.dataSpec = dataSpec;
|
||||||
|
this.dataSpecFragmentSize =
|
||||||
|
!respectCacheFragmentationFlag
|
||||||
|
|| dataSpec.isFlagSet(DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION)
|
||||||
|
? fragmentSize
|
||||||
|
: Long.MAX_VALUE;
|
||||||
dataSpecBytesWritten = 0;
|
dataSpecBytesWritten = 0;
|
||||||
try {
|
try {
|
||||||
openNextOutputStream();
|
openNextOutputStream();
|
||||||
@ -149,12 +166,12 @@ public final class CacheDataSink implements DataSink {
|
|||||||
try {
|
try {
|
||||||
int bytesWritten = 0;
|
int bytesWritten = 0;
|
||||||
while (bytesWritten < length) {
|
while (bytesWritten < length) {
|
||||||
if (outputStreamBytesWritten == maxCacheFileSize) {
|
if (outputStreamBytesWritten == dataSpecFragmentSize) {
|
||||||
closeCurrentOutputStream();
|
closeCurrentOutputStream();
|
||||||
openNextOutputStream();
|
openNextOutputStream();
|
||||||
}
|
}
|
||||||
int bytesToWrite = (int) Math.min(length - bytesWritten,
|
int bytesToWrite =
|
||||||
maxCacheFileSize - outputStreamBytesWritten);
|
(int) Math.min(length - bytesWritten, dataSpecFragmentSize - outputStreamBytesWritten);
|
||||||
outputStream.write(buffer, offset + bytesWritten, bytesToWrite);
|
outputStream.write(buffer, offset + bytesWritten, bytesToWrite);
|
||||||
bytesWritten += bytesToWrite;
|
bytesWritten += bytesToWrite;
|
||||||
outputStreamBytesWritten += bytesToWrite;
|
outputStreamBytesWritten += bytesToWrite;
|
||||||
@ -181,7 +198,7 @@ public final class CacheDataSink implements DataSink {
|
|||||||
long length =
|
long length =
|
||||||
dataSpec.length == C.LENGTH_UNSET
|
dataSpec.length == C.LENGTH_UNSET
|
||||||
? C.LENGTH_UNSET
|
? C.LENGTH_UNSET
|
||||||
: Math.min(dataSpec.length - dataSpecBytesWritten, maxCacheFileSize);
|
: Math.min(dataSpec.length - dataSpecBytesWritten, dataSpecFragmentSize);
|
||||||
file =
|
file =
|
||||||
cache.startFile(
|
cache.startFile(
|
||||||
dataSpec.key, dataSpec.absoluteStreamPosition + dataSpecBytesWritten, length);
|
dataSpec.key, dataSpec.absoluteStreamPosition + dataSpecBytesWritten, length);
|
||||||
|
@ -23,27 +23,50 @@ import com.google.android.exoplayer2.upstream.DataSink;
|
|||||||
public final class CacheDataSinkFactory implements DataSink.Factory {
|
public final class CacheDataSinkFactory implements DataSink.Factory {
|
||||||
|
|
||||||
private final Cache cache;
|
private final Cache cache;
|
||||||
private final long maxCacheFileSize;
|
private final long fragmentSize;
|
||||||
private final int bufferSize;
|
private final int bufferSize;
|
||||||
|
|
||||||
/**
|
private boolean syncFileDescriptor;
|
||||||
* @see CacheDataSink#CacheDataSink(Cache, long)
|
private boolean respectCacheFragmentationFlag;
|
||||||
*/
|
|
||||||
public CacheDataSinkFactory(Cache cache, long maxCacheFileSize) {
|
/** @see CacheDataSink#CacheDataSink(Cache, long) */
|
||||||
this(cache, maxCacheFileSize, CacheDataSink.DEFAULT_BUFFER_SIZE);
|
public CacheDataSinkFactory(Cache cache, long fragmentSize) {
|
||||||
|
this(cache, fragmentSize, CacheDataSink.DEFAULT_BUFFER_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @see CacheDataSink#CacheDataSink(Cache, long, int) */
|
||||||
|
public CacheDataSinkFactory(Cache cache, long fragmentSize, int bufferSize) {
|
||||||
|
this.cache = cache;
|
||||||
|
this.fragmentSize = fragmentSize;
|
||||||
|
this.bufferSize = bufferSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @see CacheDataSink#CacheDataSink(Cache, long, int)
|
* See {@link CacheDataSink#experimental_setSyncFileDescriptor(boolean)}.
|
||||||
|
*
|
||||||
|
* <p>This method is experimental, and will be renamed or removed in a future release.
|
||||||
*/
|
*/
|
||||||
public CacheDataSinkFactory(Cache cache, long maxCacheFileSize, int bufferSize) {
|
public CacheDataSinkFactory experimental_setSyncFileDescriptor(boolean syncFileDescriptor) {
|
||||||
this.cache = cache;
|
this.syncFileDescriptor = syncFileDescriptor;
|
||||||
this.maxCacheFileSize = maxCacheFileSize;
|
return this;
|
||||||
this.bufferSize = bufferSize;
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* See {@link CacheDataSink#experimental_setRespectCacheFragmentationFlag(boolean)}.
|
||||||
|
*
|
||||||
|
* <p>This method is experimental, and will be renamed or removed in a future release.
|
||||||
|
*/
|
||||||
|
public CacheDataSinkFactory experimental_setRespectCacheFragmentationFlag(
|
||||||
|
boolean respectCacheFragmentationFlag) {
|
||||||
|
this.respectCacheFragmentationFlag = respectCacheFragmentationFlag;
|
||||||
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DataSink createDataSink() {
|
public DataSink createDataSink() {
|
||||||
return new CacheDataSink(cache, maxCacheFileSize, bufferSize);
|
CacheDataSink dataSink = new CacheDataSink(cache, fragmentSize, bufferSize);
|
||||||
|
dataSink.experimental_setSyncFileDescriptor(syncFileDescriptor);
|
||||||
|
dataSink.experimental_setRespectCacheFragmentationFlag(respectCacheFragmentationFlag);
|
||||||
|
return dataSink;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ public final class CacheDataSource implements DataSource {
|
|||||||
cache,
|
cache,
|
||||||
upstream,
|
upstream,
|
||||||
new FileDataSource(),
|
new FileDataSource(),
|
||||||
new CacheDataSink(cache, CacheDataSink.DEFAULT_MAX_CACHE_FILE_SIZE),
|
new CacheDataSink(cache, CacheDataSink.DEFAULT_FRAGMENT_SIZE),
|
||||||
flags,
|
flags,
|
||||||
/* eventListener= */ null);
|
/* eventListener= */ null);
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ public final class CacheDataSourceFactory implements DataSource.Factory {
|
|||||||
cache,
|
cache,
|
||||||
upstreamFactory,
|
upstreamFactory,
|
||||||
new FileDataSourceFactory(),
|
new FileDataSourceFactory(),
|
||||||
new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_MAX_CACHE_FILE_SIZE),
|
new CacheDataSinkFactory(cache, CacheDataSink.DEFAULT_FRAGMENT_SIZE),
|
||||||
flags,
|
flags,
|
||||||
/* eventListener= */ null);
|
/* eventListener= */ null);
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,7 @@ import org.robolectric.RuntimeEnvironment;
|
|||||||
public final class CacheDataSourceTest {
|
public final class CacheDataSourceTest {
|
||||||
|
|
||||||
private static final byte[] TEST_DATA = new byte[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
|
private static final byte[] TEST_DATA = new byte[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
|
||||||
private static final int MAX_CACHE_FILE_SIZE = 3;
|
private static final int CACHE_FRAGMENT_SIZE = 3;
|
||||||
private static final String DATASPEC_KEY = "dataSpecKey";
|
private static final String DATASPEC_KEY = "dataSpecKey";
|
||||||
|
|
||||||
private Uri testDataUri;
|
private Uri testDataUri;
|
||||||
@ -81,13 +81,13 @@ public final class CacheDataSourceTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMaxCacheFileSize() throws Exception {
|
public void testFragmentSize() throws Exception {
|
||||||
CacheDataSource cacheDataSource = createCacheDataSource(false, false);
|
CacheDataSource cacheDataSource = createCacheDataSource(false, false);
|
||||||
assertReadDataContentLength(cacheDataSource, boundedDataSpec, false, false);
|
assertReadDataContentLength(cacheDataSource, boundedDataSpec, false, false);
|
||||||
for (String key : cache.getKeys()) {
|
for (String key : cache.getKeys()) {
|
||||||
for (CacheSpan cacheSpan : cache.getCachedSpans(key)) {
|
for (CacheSpan cacheSpan : cache.getCachedSpans(key)) {
|
||||||
assertThat(cacheSpan.length <= MAX_CACHE_FILE_SIZE).isTrue();
|
assertThat(cacheSpan.length <= CACHE_FRAGMENT_SIZE).isTrue();
|
||||||
assertThat(cacheSpan.file.length() <= MAX_CACHE_FILE_SIZE).isTrue();
|
assertThat(cacheSpan.file.length() <= CACHE_FRAGMENT_SIZE).isTrue();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -548,14 +548,14 @@ public final class CacheDataSourceTest {
|
|||||||
setReadException,
|
setReadException,
|
||||||
unknownLength,
|
unknownLength,
|
||||||
CacheDataSource.FLAG_BLOCK_ON_CACHE,
|
CacheDataSource.FLAG_BLOCK_ON_CACHE,
|
||||||
new CacheDataSink(cache, MAX_CACHE_FILE_SIZE),
|
new CacheDataSink(cache, CACHE_FRAGMENT_SIZE),
|
||||||
cacheKeyFactory);
|
cacheKeyFactory);
|
||||||
}
|
}
|
||||||
|
|
||||||
private CacheDataSource createCacheDataSource(
|
private CacheDataSource createCacheDataSource(
|
||||||
boolean setReadException, boolean unknownLength, @CacheDataSource.Flags int flags) {
|
boolean setReadException, boolean unknownLength, @CacheDataSource.Flags int flags) {
|
||||||
return createCacheDataSource(
|
return createCacheDataSource(
|
||||||
setReadException, unknownLength, flags, new CacheDataSink(cache, MAX_CACHE_FILE_SIZE));
|
setReadException, unknownLength, flags, new CacheDataSink(cache, CACHE_FRAGMENT_SIZE));
|
||||||
}
|
}
|
||||||
|
|
||||||
private CacheDataSource createCacheDataSource(
|
private CacheDataSource createCacheDataSource(
|
||||||
@ -602,6 +602,7 @@ public final class CacheDataSourceTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private DataSpec buildDataSpec(long position, long length, @Nullable String key) {
|
private DataSpec buildDataSpec(long position, long length, @Nullable String key) {
|
||||||
return new DataSpec(testDataUri, position, length, key);
|
return new DataSpec(
|
||||||
|
testDataUri, position, length, key, DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -336,17 +336,17 @@ public final class CacheUtilTest {
|
|||||||
FakeDataSource dataSource = new FakeDataSource(fakeDataSet);
|
FakeDataSource dataSource = new FakeDataSource(fakeDataSet);
|
||||||
|
|
||||||
Uri uri = Uri.parse("test_data");
|
Uri uri = Uri.parse("test_data");
|
||||||
DataSpec dataSpec = new DataSpec(uri);
|
DataSpec dataSpec = new DataSpec(uri, DataSpec.FLAG_ALLOW_CACHE_FRAGMENTATION);
|
||||||
CacheUtil.cache(
|
CacheUtil.cache(
|
||||||
dataSpec,
|
dataSpec,
|
||||||
cache,
|
cache,
|
||||||
/* cacheKeyFactory= */ null,
|
/* cacheKeyFactory= */ null,
|
||||||
// Set maxCacheFileSize to 10 to make sure there are multiple spans.
|
// Set fragmentSize to 10 to make sure there are multiple spans.
|
||||||
new CacheDataSource(
|
new CacheDataSource(
|
||||||
cache,
|
cache,
|
||||||
dataSource,
|
dataSource,
|
||||||
new FileDataSource(),
|
new FileDataSource(),
|
||||||
new CacheDataSink(cache, /* maxCacheFileSize= */ 10),
|
new CacheDataSink(cache, /* fragmentSize= */ 10),
|
||||||
/* flags= */ 0,
|
/* flags= */ 0,
|
||||||
/* eventListener= */ null),
|
/* eventListener= */ null),
|
||||||
new byte[CacheUtil.DEFAULT_BUFFER_SIZE_BYTES],
|
new byte[CacheUtil.DEFAULT_BUFFER_SIZE_BYTES],
|
||||||
|
Loading…
x
Reference in New Issue
Block a user