/** * An interface for writing to and reading from a disk cache. */ publicinterfaceDiskCache{
/** * An interface for lazily creating a disk cache. */ interfaceFactory{ /** 250 MB of cache. */ int DEFAULT_DISK_CACHE_SIZE = 250 * 1024 * 1024; String DEFAULT_DISK_CACHE_DIR = "image_manager_disk_cache";
/** Returns a new disk cache, or {@code null} if no disk cache could be created. */ @Nullable DiskCache build(); }
/** * An interface to actually write data to a key in the disk cache. */ interfaceWriter{ /** * Writes data to the file and returns true if the write was successful and should be committed, * and false if the write should be aborted. * * @param file The File the Writer should write to. */ booleanwrite(@NonNull File file); }
/** * Get the cache for the value at the given key. * * <p> Note - This is potentially dangerous, someone may write a new value to the file at any * point in time and we won't know about it. </p> * * @param key The key in the cache. * @return An InputStream representing the data at key at the time get is called. */ @Nullable File get(Key key);
/** * Write to a key in the cache. {@link Writer} is used so that the cache implementation can * perform actions after the write finishes, like commit (via atomic file rename). * * @param key The key to write to. * @param writer An interface that will write data given an OutputStream for the key. */ voidput(Key key, Writer writer);
/** * Remove the key and value from the cache. * * @param key The key to remove. */ // Public API. @SuppressWarnings("unused") voiddelete(Key key);
/** * Creates an {@link com.bumptech.glide.disklrucache.DiskLruCache} based disk cache in the external * disk cache directory, which falls back to the internal disk cache if no external storage is * available. If ever fell back to the internal disk cache, will use that one from that moment on. * * <p><b>Images can be read by everyone when using external disk cache.</b> */ // Public API. @SuppressWarnings({"unused", "WeakerAccess"}) publicfinalclassExternalPreferredCacheDiskCacheFactoryextendsDiskLruCacheFactory{
@Override public File getCacheDirectory(){ File internalCacheDirectory = getInternalCacheDirectory();
// Already used internal cache, so keep using that one, // thus avoiding using both external and internal with transient errors. if ((null != internalCacheDirectory) && internalCacheDirectory.exists()) { return internalCacheDirectory; }
/** * Creates an {@link com.bumptech.glide.disklrucache.DiskLruCache} based disk cache in the specified * disk cache directory. * * <p>If you need to make I/O access before returning the cache directory use the {@link * DiskLruCacheFactory#DiskLruCacheFactory(CacheDirectoryGetter, long)} constructor variant. */ // Public API. @SuppressWarnings("unused") publicclassDiskLruCacheFactoryimplementsDiskCache.Factory{ privatefinallong diskCacheSize; privatefinal CacheDirectoryGetter cacheDirectoryGetter;
/** * Interface called out of UI thread to get the cache folder. */ publicinterfaceCacheDirectoryGetter{ File getCacheDirectory(); }
publicDiskLruCacheFactory(final String diskCacheFolder, final String diskCacheName, long diskCacheSize){ this(new CacheDirectoryGetter() { @Override public File getCacheDirectory(){ returnnew File(diskCacheFolder, diskCacheName); } }, diskCacheSize); }
/** * When using this constructor {@link CacheDirectoryGetter#getCacheDirectory()} will be called out * of UI thread, allowing to do I/O access without performance impacts. * * @param cacheDirectoryGetter Interface called out of UI thread to get the cache folder. * @param diskCacheSize Desired max bytes size for the LRU disk cache. */ // Public API. @SuppressWarnings("WeakerAccess") publicDiskLruCacheFactory(CacheDirectoryGetter cacheDirectoryGetter, long diskCacheSize){ this.diskCacheSize = diskCacheSize; this.cacheDirectoryGetter = cacheDirectoryGetter; }
@Override public DiskCache build(){ File cacheDir = cacheDirectoryGetter.getCacheDirectory();
if (cacheDir == null) { returnnull; }
if (!cacheDir.mkdirs() && (!cacheDir.exists() || !cacheDir.isDirectory())) { returnnull; }
/** * Create a new DiskCache in the given directory with a specified max size. * * @param directory The directory for the disk cache * @param maxSize The max size for the disk cache * @return The new disk cache with the given arguments */ @SuppressWarnings("deprecation") publicstatic DiskCache create(File directory, long maxSize){ returnnew DiskLruCacheWrapper(directory, maxSize); }
/** * @deprecated Do not extend this class. */ @Deprecated // Deprecated public API. @SuppressWarnings({"WeakerAccess", "DeprecatedIsStillUsed"}) protectedDiskLruCacheWrapper(File directory, long maxSize){ this.directory = directory; this.maxSize = maxSize; this.safeKeyGenerator = new SafeKeyGenerator(); }
@Override public File get(Key key){ String safeKey = safeKeyGenerator.getSafeKey(key); if (Log.isLoggable(TAG, Log.VERBOSE)) { Log.v(TAG, "Get: Obtained: " + safeKey + " for for Key: " + key); } File result = null; try { // It is possible that the there will be a put in between these two gets. If so that shouldn't // be a problem because we will always put the same value at the same key so our input streams // will still represent the same data. final DiskLruCache.Value value = getDiskCache().get(safeKey); if (value != null) { result = value.getFile(0); } } catch (IOException e) { if (Log.isLoggable(TAG, Log.WARN)) { Log.w(TAG, "Unable to get from disk cache", e); } } return result; }
privatefinal DiskCacheWriteLocker writeLocker = new DiskCacheWriteLocker();
@Override publicvoidput(Key key, Writer writer){ // We want to make sure that puts block so that data is available when put completes. We may // actually not write any data if we find that data is written by the time we acquire the lock. String safeKey = safeKeyGenerator.getSafeKey(key); // 获取针对 safeKey 的锁 writeLocker.acquire(safeKey); try { if (Log.isLoggable(TAG, Log.VERBOSE)) { Log.v(TAG, "Put: Obtained: " + safeKey + " for for Key: " + key); } try { // We assume we only need to put once, so if data was written while we were trying to get // the lock, we can simply abort. DiskLruCache diskCache = getDiskCache(); Value current = diskCache.get(safeKey); if (current != null) { return; }
DiskLruCache.Editor editor = diskCache.edit(safeKey); if (editor == null) { thrownew IllegalStateException("Had two simultaneous puts for: " + safeKey); } try { File file = editor.getFile(0); if (writer.write(file)) { editor.commit(); } } finally { // DiskLruCache 实现了日志系统,写文件作为一个事务来处理 editor.abortUnlessCommitted(); } } catch (IOException e) { if (Log.isLoggable(TAG, Log.WARN)) { Log.w(TAG, "Unable to put to disk cache", e); } } } finally { writeLocker.release(safeKey); } }
@Override publicvoiddelete(Key key){ String safeKey = safeKeyGenerator.getSafeKey(key); try { getDiskCache().remove(safeKey); } catch (IOException e) { if (Log.isLoggable(TAG, Log.WARN)) { Log.w(TAG, "Unable to delete from disk cache", e); } } }
@Override publicsynchronizedvoidclear(){ try { getDiskCache().delete(); } catch (IOException e) { if (Log.isLoggable(TAG, Log.WARN)) { Log.w(TAG, "Unable to clear disk cache or disk cache cleared externally", e); } } finally { // Delete can close the cache but still throw. If we don't null out the disk cache here, every // subsequent request will try to act on a closed disk cache and fail. By nulling out the disk // cache we at least allow for attempts to open the cache in the future. See #2465. resetDiskCache(); } }
/* * This cache uses a journal file named "journal". A typical journal file * looks like this: * libcore.io.DiskLruCache * 1 * 100 * 2 * * CLEAN 3400330d1dfc7f3f7f4b8d4d803dfcf6 832 21054 * DIRTY 335c4c6028171cfddfbaae1a9c313c52 * CLEAN 335c4c6028171cfddfbaae1a9c313c52 3934 2342 * REMOVE 335c4c6028171cfddfbaae1a9c313c52 * DIRTY 1ab96a171faeeee38496d8b330771a7a * CLEAN 1ab96a171faeeee38496d8b330771a7a 1600 234 * READ 335c4c6028171cfddfbaae1a9c313c52 * READ 3400330d1dfc7f3f7f4b8d4d803dfcf6 * * The first five lines of the journal form its header. They are the * constant string "libcore.io.DiskLruCache", the disk cache's version, * the application's version, the value count, and a blank line. * * Each of the subsequent lines in the file is a record of the state of a * cache entry. Each line contains space-separated values: a state, a key, * and optional state-specific values. * o DIRTY lines track that an entry is actively being created or updated. * Every successful DIRTY action should be followed by a CLEAN or REMOVE * action. DIRTY lines without a matching CLEAN or REMOVE indicate that * temporary files may need to be deleted. * o CLEAN lines track a cache entry that has been successfully published * and may be read. A publish line is followed by the lengths of each of * its values. * o READ lines track accesses for LRU. * o REMOVE lines track entries that have been deleted. * * The journal file is appended to as cache operations occur. The journal may * occasionally be compacted by dropping redundant lines. A temporary file named * "journal.tmp" will be used during compaction; that file should be deleted if * it exists when the cache is opened. */
/** * Opens the cache in {@code directory}, creating a cache if none exists * there. * * @param directory a writable directory * @param valueCount the number of values per cache entry. Must be positive. * @param maxSize the maximum number of bytes this cache should use to store * @throws IOException if reading or writing the cache directory fails */ publicstatic DiskLruCache open(File directory, int appVersion, int valueCount, long maxSize) throws IOException { if (maxSize <= 0) { thrownew IllegalArgumentException("maxSize <= 0"); } if (valueCount <= 0) { thrownew IllegalArgumentException("valueCount <= 0"); }
// If a bkp file exists, use it instead. File backupFile = new File(directory, JOURNAL_FILE_BACKUP); if (backupFile.exists()) { File journalFile = new File(directory, JOURNAL_FILE); // If journal file also exists just delete backup file. if (journalFile.exists()) { backupFile.delete(); } else { renameTo(backupFile, journalFile, false); } }
// Prefer to pick up where we left off. DiskLruCache cache = new DiskLruCache(directory, appVersion, valueCount, maxSize); if (cache.journalFile.exists()) { try { cache.readJournal(); cache.processJournal(); return cache; } catch (IOException journalIsCorrupt) { System.out .println("DiskLruCache " + directory + " is corrupt: " + journalIsCorrupt.getMessage() + ", removing"); cache.delete(); } }
// Create a new empty cache. directory.mkdirs(); cache = new DiskLruCache(directory, appVersion, valueCount, maxSize); cache.rebuildJournal(); return cache; }
// 写了一半,应用就跪了 // If we ended on a truncated line, rebuild the journal before appending to it. if (reader.hasUnterminatedLine()) { rebuildJournal(); } else { journalWriter = new BufferedWriter(new OutputStreamWriter( new FileOutputStream(journalFile, true), Util.US_ASCII)); } } finally { Util.closeQuietly(reader); } }
/** * Creates a new journal that omits redundant information. This replaces the * current journal if it exists. */ privatesynchronizedvoidrebuildJournal()throws IOException { if (journalWriter != null) { journalWriter.close(); }
/** Lengths of this entry's files. */ privatefinallong[] lengths;
/** Memoized File objects for this entry to avoid char[] allocations. */ File[] cleanFiles; File[] dirtyFiles;
/** True if this entry has ever been published. */ privateboolean readable;
/** The ongoing edit or null if this entry is not being edited. */ private Editor currentEditor;
/** The sequence number of the most recently committed edit to this entry. */ privatelong sequenceNumber;
privateEntry(String key){ this.key = key; this.lengths = newlong[valueCount]; cleanFiles = new File[valueCount]; dirtyFiles = new File[valueCount];
// The names are repetitive so re-use the same builder to avoid allocations. StringBuilder fileBuilder = new StringBuilder(key).append('.'); int truncateTo = fileBuilder.length(); for (int i = 0; i < valueCount; i++) { fileBuilder.append(i); cleanFiles[i] = new File(directory, fileBuilder.toString()); fileBuilder.append(".tmp"); dirtyFiles[i] = new File(directory, fileBuilder.toString()); fileBuilder.setLength(truncateTo); } } }
/** * Computes the initial size and collects garbage as a part of opening the * cache. Dirty entries are assumed to be inconsistent and will be deleted. */ privatevoidprocessJournal()throws IOException { deleteIfExists(journalFileTmp); for (Iterator<Entry> i = lruEntries.values().iterator(); i.hasNext(); ) { Entry entry = i.next(); if (entry.currentEditor == null) { // 这个 entry 处于 CLEAN 状态 for (int t = 0; t < valueCount; t++) { // size 是当前硬盘缓存占用的字节数 size += entry.lengths[t]; } } else { // DIRTY,说明上次有人写了一半就跪了。这种情况下,需要删除对应的文件 entry.currentEditor = null; // valueCount 是我们在构造 DiskLruCache 时传递进来的,表示一个 entry 有多少个 value // 我们用它来缓存文件,一个 entry 对应一个文件,valueCount == 1 for (int t = 0; t < valueCount; t++) { deleteIfExists(entry.getCleanFile(t)); deleteIfExists(entry.getDirtyFile(t)); } i.remove(); } } }
/** * Returns an editor for the entry named {@code key}, or null if another * edit is in progress. */ public Editor edit(String key)throws IOException { return edit(key, ANY_SEQUENCE_NUMBER); }
privatesynchronized Editor edit(String key, long expectedSequenceNumber)throws IOException { checkNotClosed(); Entry entry = lruEntries.get(key); if (expectedSequenceNumber != ANY_SEQUENCE_NUMBER && (entry == null || entry.sequenceNumber != expectedSequenceNumber)) { returnnull; // Value is stale. } if (entry == null) { // 新建一个缓存 entry = new Entry(key); lruEntries.put(key, entry); } elseif (entry.currentEditor != null) { returnnull; // Another edit is in progress. }
Editor editor = new Editor(entry); entry.currentEditor = editor;
// Flush the journal before creating files to prevent file leaks. journalWriter.append(DIRTY); journalWriter.append(' '); journalWriter.append(key); journalWriter.append('\n'); journalWriter.flush(); return editor; }
/** * Commits this edit so it is visible to readers. This releases the * edit lock so another edit may be started on the same key. */ publicvoidcommit()throws IOException { // The object using this Editor must catch and handle any errors // during the write. If there is an error and they call commit // anyway, we will assume whatever they managed to write was valid. // Normally they should call abort. completeEdit(this, true); committed = true; }
// If this edit is creating the entry for the first time, every index must have a value. if (success && !entry.readable) { for (int i = 0; i < valueCount; i++) { if (!editor.written[i]) { editor.abort(); thrownew IllegalStateException("Newly created entry didn't create value for index " + i); } if (!entry.getDirtyFile(i).exists()) { editor.abort(); return; } } }
for (int i = 0; i < valueCount; i++) { File dirty = entry.getDirtyFile(i); if (success) { if (dirty.exists()) { File clean = entry.getCleanFile(i); dirty.renameTo(clean); long oldLength = entry.lengths[i]; long newLength = clean.length(); entry.lengths[i] = newLength; size = size - oldLength + newLength; } } else { deleteIfExists(dirty); } }
/** * Aborts this edit. This releases the edit lock so another edit may be * started on the same key. */ publicvoidabort()throws IOException { // 传入 false 会导致对应的 entry 被删除 completeEdit(this, false); }
/** * Returns a snapshot of the entry named {@code key}, or null if it doesn't * exist is not currently readable. If a value is returned, it is moved to * the head of the LRU queue. */ publicsynchronized Value get(String key)throws IOException { checkNotClosed(); Entry entry = lruEntries.get(key); if (entry == null) { returnnull; }
if (!entry.readable) { returnnull; }
for (File file : entry.cleanFiles) { // A file must have been deleted manually! if (!file.exists()) { returnnull; } }
/** * Drops the entry for {@code key} if it exists and can be removed. Entries * actively being edited cannot be removed. * * @return true if an entry was removed. */ publicsynchronizedbooleanremove(String key)throws IOException { checkNotClosed(); Entry entry = lruEntries.get(key); if (entry == null || entry.currentEditor != null) { returnfalse; }
for (int i = 0; i < valueCount; i++) { File file = entry.getCleanFile(i); if (file.exists() && !file.delete()) { thrownew IOException("failed to delete " + file); } size -= entry.lengths[i]; entry.lengths[i] = 0; }
/** * Closes the cache and deletes all of its stored values. This will delete * all files in the cache directory including files that weren't created by * the cache. */ publicvoiddelete()throws IOException { close(); // 所有的缓存文件都放在 directory 里,整个目录删掉就“清空”了 Util.deleteContents(directory); }
/** Closes this cache. Stored values will remain on the filesystem. */ publicsynchronizedvoidclose()throws IOException { if (journalWriter == null) { return; // Already closed. } for (Entry entry : new ArrayList<Entry>(lruEntries.values())) { if (entry.currentEditor != null) { entry.currentEditor.abort(); } } trimToSize(); journalWriter.close(); journalWriter = null; }