From e6af0affb6420daab1087dd5944c2625ceba1d31 Mon Sep 17 00:00:00 2001 From: Graham Date: Thu, 25 Mar 2021 21:29:14 +0000 Subject: [PATCH] Ignore corrupt indexes in Js5MasterIndex::create Signed-off-by: Graham --- .../openrs2/archive/cache/CacheImporter.kt | 12 +++- .../org/openrs2/cache/Js5MasterIndex.kt | 65 ++++++++++++------- 2 files changed, 50 insertions(+), 27 deletions(-) diff --git a/archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt b/archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt index 5f9e1a636c..aca396da21 100644 --- a/archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt +++ b/archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt @@ -14,6 +14,7 @@ import org.openrs2.cache.Js5Index import org.openrs2.cache.Js5MasterIndex import org.openrs2.cache.MasterIndexFormat import org.openrs2.cache.Store +import org.openrs2.cache.StoreCorruptException import org.openrs2.cache.VersionTrailer import org.openrs2.crypto.Whirlpool import org.openrs2.db.Database @@ -124,9 +125,14 @@ public class CacheImporter @Inject constructor( val indexGroups = mutableListOf() try { for (archive in store.list(Js5Archive.ARCHIVESET)) { - val indexGroup = readIndex(store, archive) - indexes[archive] = indexGroup.index - indexGroups += indexGroup + try { + val indexGroup = readIndex(store, archive) + indexes[archive] = indexGroup.index + indexGroups += indexGroup + } catch (ex: StoreCorruptException) { + // see the comment in Js5MasterIndex::create + logger.warn(ex) { "Skipping corrupt index (archive $archive)" } + } } for (index in indexGroups) { diff --git a/cache/src/main/kotlin/org/openrs2/cache/Js5MasterIndex.kt b/cache/src/main/kotlin/org/openrs2/cache/Js5MasterIndex.kt index 23c4f03e30..6cf1b85403 100644 --- a/cache/src/main/kotlin/org/openrs2/cache/Js5MasterIndex.kt +++ b/cache/src/main/kotlin/org/openrs2/cache/Js5MasterIndex.kt @@ -122,6 +122,47 @@ public data class Js5MasterIndex( var nextArchive = 0 for (archive in store.list(Js5Archive.ARCHIVESET)) { + val entry = try { + store.read(Js5Archive.ARCHIVESET, archive).use { buf -> + val checksum = buf.crc32() + val digest = buf.whirlpool() + + Js5Compression.uncompress(buf).use { uncompressed -> + val index = Js5Index.read(uncompressed) + + if (index.hasLengths) { + masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.LENGTHS) + } else if (index.hasDigests) { + masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.DIGESTS) + } else if (index.protocol >= Js5Protocol.VERSIONED) { + masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.VERSIONED) + } + + val version = index.version + val groups = index.size + val totalUncompressedLength = index.sumBy(Js5Index.Group::uncompressedLength) + + // TODO(gpe): should we throw an exception if there are trailing bytes here or in the block above? + Entry(version, checksum, groups, totalUncompressedLength, digest) + } + } + } catch (ex: StoreCorruptException) { + /** + * Unused indexes are never removed from the .idx255 file + * by the client. If the .dat2 file reaches its maximum + * size, it is truncated and all block numbers in the + * .idx255 file will be invalid. + * + * Any in-use indexes will be overwritten, but unused + * indexes will remain in the .idx255 file with invalid + * block numbers. + * + * We therefore expect to see corrupt indexes sometimes. We + * ignore these as if they didn't exist. + */ + continue + } + /* * Fill in gaps with zeroes. I think this is consistent with * the official implementation: the TFU client warns that @@ -131,30 +172,6 @@ public data class Js5MasterIndex( masterIndex.entries += Entry(0, 0, 0, 0, null) } - val entry = store.read(Js5Archive.ARCHIVESET, archive).use { buf -> - val checksum = buf.crc32() - val digest = buf.whirlpool() - - Js5Compression.uncompress(buf).use { uncompressed -> - val index = Js5Index.read(uncompressed) - - if (index.hasLengths) { - masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.LENGTHS) - } else if (index.hasDigests) { - masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.DIGESTS) - } else if (index.protocol >= Js5Protocol.VERSIONED) { - masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.VERSIONED) - } - - val version = index.version - val groups = index.size - val totalUncompressedLength = index.sumBy(Js5Index.Group::uncompressedLength) - - // TODO(gpe): should we throw an exception if there are trailing bytes here or in the block above? - Entry(version, checksum, groups, totalUncompressedLength, digest) - } - } - masterIndex.entries += entry nextArchive = archive + 1 }