Ignore corrupt indexes in Js5MasterIndex::create

Signed-off-by: Graham <gpe@openrs2.org>
Graham 4 years ago
parent bf86fa48e9
commit e6af0affb6
  1. 12
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt
  2. 65
      cache/src/main/kotlin/org/openrs2/cache/Js5MasterIndex.kt

@ -14,6 +14,7 @@ import org.openrs2.cache.Js5Index
import org.openrs2.cache.Js5MasterIndex import org.openrs2.cache.Js5MasterIndex
import org.openrs2.cache.MasterIndexFormat import org.openrs2.cache.MasterIndexFormat
import org.openrs2.cache.Store import org.openrs2.cache.Store
import org.openrs2.cache.StoreCorruptException
import org.openrs2.cache.VersionTrailer import org.openrs2.cache.VersionTrailer
import org.openrs2.crypto.Whirlpool import org.openrs2.crypto.Whirlpool
import org.openrs2.db.Database import org.openrs2.db.Database
@ -124,9 +125,14 @@ public class CacheImporter @Inject constructor(
val indexGroups = mutableListOf<Index>() val indexGroups = mutableListOf<Index>()
try { try {
for (archive in store.list(Js5Archive.ARCHIVESET)) { for (archive in store.list(Js5Archive.ARCHIVESET)) {
val indexGroup = readIndex(store, archive) try {
indexes[archive] = indexGroup.index val indexGroup = readIndex(store, archive)
indexGroups += indexGroup indexes[archive] = indexGroup.index
indexGroups += indexGroup
} catch (ex: StoreCorruptException) {
// see the comment in Js5MasterIndex::create
logger.warn(ex) { "Skipping corrupt index (archive $archive)" }
}
} }
for (index in indexGroups) { for (index in indexGroups) {

@ -122,6 +122,47 @@ public data class Js5MasterIndex(
var nextArchive = 0 var nextArchive = 0
for (archive in store.list(Js5Archive.ARCHIVESET)) { for (archive in store.list(Js5Archive.ARCHIVESET)) {
val entry = try {
store.read(Js5Archive.ARCHIVESET, archive).use { buf ->
val checksum = buf.crc32()
val digest = buf.whirlpool()
Js5Compression.uncompress(buf).use { uncompressed ->
val index = Js5Index.read(uncompressed)
if (index.hasLengths) {
masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.LENGTHS)
} else if (index.hasDigests) {
masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.DIGESTS)
} else if (index.protocol >= Js5Protocol.VERSIONED) {
masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.VERSIONED)
}
val version = index.version
val groups = index.size
val totalUncompressedLength = index.sumBy(Js5Index.Group::uncompressedLength)
// TODO(gpe): should we throw an exception if there are trailing bytes here or in the block above?
Entry(version, checksum, groups, totalUncompressedLength, digest)
}
}
} catch (ex: StoreCorruptException) {
/**
* Unused indexes are never removed from the .idx255 file
* by the client. If the .dat2 file reaches its maximum
* size, it is truncated and all block numbers in the
* .idx255 file will be invalid.
*
* Any in-use indexes will be overwritten, but unused
* indexes will remain in the .idx255 file with invalid
* block numbers.
*
* We therefore expect to see corrupt indexes sometimes. We
* ignore these as if they didn't exist.
*/
continue
}
/* /*
* Fill in gaps with zeroes. I think this is consistent with * Fill in gaps with zeroes. I think this is consistent with
* the official implementation: the TFU client warns that * the official implementation: the TFU client warns that
@ -131,30 +172,6 @@ public data class Js5MasterIndex(
masterIndex.entries += Entry(0, 0, 0, 0, null) masterIndex.entries += Entry(0, 0, 0, 0, null)
} }
val entry = store.read(Js5Archive.ARCHIVESET, archive).use { buf ->
val checksum = buf.crc32()
val digest = buf.whirlpool()
Js5Compression.uncompress(buf).use { uncompressed ->
val index = Js5Index.read(uncompressed)
if (index.hasLengths) {
masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.LENGTHS)
} else if (index.hasDigests) {
masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.DIGESTS)
} else if (index.protocol >= Js5Protocol.VERSIONED) {
masterIndex.format = maxOf(masterIndex.format, MasterIndexFormat.VERSIONED)
}
val version = index.version
val groups = index.size
val totalUncompressedLength = index.sumBy(Js5Index.Group::uncompressedLength)
// TODO(gpe): should we throw an exception if there are trailing bytes here or in the block above?
Entry(version, checksum, groups, totalUncompressedLength, digest)
}
}
masterIndex.entries += entry masterIndex.entries += entry
nextArchive = archive + 1 nextArchive = archive + 1
} }

Loading…
Cancel
Save