Ignore corrupt indexes in Js5MasterIndex::create

Signed-off-by: Graham <gpe@openrs2.org>
bzip2
Graham 4 years ago
parent bf86fa48e9
commit e6af0affb6
  1. 6
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt
  2. 37
      cache/src/main/kotlin/org/openrs2/cache/Js5MasterIndex.kt

@ -14,6 +14,7 @@ import org.openrs2.cache.Js5Index
import org.openrs2.cache.Js5MasterIndex import org.openrs2.cache.Js5MasterIndex
import org.openrs2.cache.MasterIndexFormat import org.openrs2.cache.MasterIndexFormat
import org.openrs2.cache.Store import org.openrs2.cache.Store
import org.openrs2.cache.StoreCorruptException
import org.openrs2.cache.VersionTrailer import org.openrs2.cache.VersionTrailer
import org.openrs2.crypto.Whirlpool import org.openrs2.crypto.Whirlpool
import org.openrs2.db.Database import org.openrs2.db.Database
@ -124,9 +125,14 @@ public class CacheImporter @Inject constructor(
val indexGroups = mutableListOf<Index>() val indexGroups = mutableListOf<Index>()
try { try {
for (archive in store.list(Js5Archive.ARCHIVESET)) { for (archive in store.list(Js5Archive.ARCHIVESET)) {
try {
val indexGroup = readIndex(store, archive) val indexGroup = readIndex(store, archive)
indexes[archive] = indexGroup.index indexes[archive] = indexGroup.index
indexGroups += indexGroup indexGroups += indexGroup
} catch (ex: StoreCorruptException) {
// see the comment in Js5MasterIndex::create
logger.warn(ex) { "Skipping corrupt index (archive $archive)" }
}
} }
for (index in indexGroups) { for (index in indexGroups) {

@ -122,16 +122,8 @@ public data class Js5MasterIndex(
var nextArchive = 0 var nextArchive = 0
for (archive in store.list(Js5Archive.ARCHIVESET)) { for (archive in store.list(Js5Archive.ARCHIVESET)) {
/* val entry = try {
* Fill in gaps with zeroes. I think this is consistent with store.read(Js5Archive.ARCHIVESET, archive).use { buf ->
* the official implementation: the TFU client warns that
* entries with a zero CRC are probably invalid.
*/
for (i in nextArchive until archive) {
masterIndex.entries += Entry(0, 0, 0, 0, null)
}
val entry = store.read(Js5Archive.ARCHIVESET, archive).use { buf ->
val checksum = buf.crc32() val checksum = buf.crc32()
val digest = buf.whirlpool() val digest = buf.whirlpool()
@ -154,6 +146,31 @@ public data class Js5MasterIndex(
Entry(version, checksum, groups, totalUncompressedLength, digest) Entry(version, checksum, groups, totalUncompressedLength, digest)
} }
} }
} catch (ex: StoreCorruptException) {
/**
* Unused indexes are never removed from the .idx255 file
* by the client. If the .dat2 file reaches its maximum
* size, it is truncated and all block numbers in the
* .idx255 file will be invalid.
*
* Any in-use indexes will be overwritten, but unused
* indexes will remain in the .idx255 file with invalid
* block numbers.
*
* We therefore expect to see corrupt indexes sometimes. We
* ignore these as if they didn't exist.
*/
continue
}
/*
* Fill in gaps with zeroes. I think this is consistent with
* the official implementation: the TFU client warns that
* entries with a zero CRC are probably invalid.
*/
for (i in nextArchive until archive) {
masterIndex.entries += Entry(0, 0, 0, 0, null)
}
masterIndex.entries += entry masterIndex.entries += entry
nextArchive = archive + 1 nextArchive = archive + 1

Loading…
Cancel
Save