package org.openrs2.archive.cache import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.annotation.JsonProperty import com.fasterxml.jackson.annotation.JsonUnwrapped import io.netty.buffer.ByteBufAllocator import io.netty.buffer.Unpooled import org.openrs2.buffer.use import org.openrs2.cache.ChecksumTable import org.openrs2.cache.DiskStore import org.openrs2.cache.Js5Archive import org.openrs2.cache.Js5Compression import org.openrs2.cache.Js5MasterIndex import org.openrs2.cache.MasterIndexFormat import org.openrs2.cache.Store import org.openrs2.crypto.XteaKey import org.openrs2.db.Database import org.postgresql.util.PGobject import java.sql.Connection import java.time.Instant import java.util.SortedSet import javax.inject.Inject import javax.inject.Singleton @Singleton public class CacheExporter @Inject constructor( private val database: Database, private val alloc: ByteBufAllocator ) { public data class Stats( val validIndexes: Long, val indexes: Long, val validGroups: Long, val groups: Long, val validKeys: Long, val keys: Long, val size: Long, val blocks: Long ) { @JsonIgnore public val allIndexesValid: Boolean = indexes == validIndexes && indexes != 0L @JsonIgnore public val validIndexesFraction: Double = if (indexes == 0L) { 1.0 } else { validIndexes.toDouble() / indexes } @JsonIgnore public val allGroupsValid: Boolean = groups == validGroups @JsonIgnore public val validGroupsFraction: Double = if (groups == 0L) { 1.0 } else { validGroups.toDouble() / groups } @JsonIgnore public val allKeysValid: Boolean = keys == validKeys @JsonIgnore public val validKeysFraction: Double = if (keys == 0L) { 1.0 } else { validKeys.toDouble() / keys } /* * The max block ID is conveniently also the max number of blocks, as * zero is reserved. */ public val diskStoreValid: Boolean = blocks <= DiskStore.MAX_BLOCK } public data class Build(val major: Int, val minor: Int?) : Comparable { override fun compareTo(other: Build): Int { return compareValuesBy(this, other, Build::major, Build::minor) } override fun toString(): String { return if (minor != null) { "$major.$minor" } else { major.toString() } } internal companion object { internal fun fromPgObject(o: PGobject): Build? { val value = o.value!! require(value.length >= 2) val parts = value.substring(1, value.length - 1).split(",") require(parts.size == 2) val major = parts[0] val minor = parts[1] if (major.isEmpty()) { return null } return Build(major.toInt(), if (minor.isEmpty()) null else minor.toInt()) } } } public data class CacheSummary( val id: Int, val game: String, val environment: String, val language: String, val builds: SortedSet, val timestamp: Instant?, val sources: SortedSet, @JsonUnwrapped val stats: Stats? ) public data class Cache( val id: Int, val sources: List, val updates: List, val stats: Stats?, val masterIndex: Js5MasterIndex?, val checksumTable: ChecksumTable? ) public data class Source( val game: String, val environment: String, val language: String, val build: Build?, val timestamp: Instant?, val name: String?, val description: String?, val url: String? ) public data class Key( val archive: Int, val group: Int, val nameHash: Int?, val name: String?, @JsonProperty("mapsquare") val mapSquare: Int?, val key: XteaKey ) public suspend fun list(): List { return database.execute { connection -> connection.prepareStatement( """ SELECT * FROM ( SELECT c.id, g.name AS game, e.name AS environment, l.iso_code AS language, array_remove(array_agg(DISTINCT ROW(s.build_major, s.build_minor)::build ORDER BY ROW(s.build_major, s.build_minor)::build ASC), NULL) builds, MIN(s.timestamp) AS timestamp, array_remove(array_agg(DISTINCT s.name ORDER BY s.name ASC), NULL) sources, cs.valid_indexes, cs.indexes, cs.valid_groups, cs.groups, cs.valid_keys, cs.keys, cs.size, cs.blocks FROM caches c JOIN sources s ON s.cache_id = c.id JOIN game_variants v ON v.id = s.game_id JOIN games g ON g.id = v.game_id JOIN environments e ON e.id = v.environment_id JOIN languages l ON l.id = v.language_id LEFT JOIN cache_stats cs ON cs.cache_id = c.id GROUP BY c.id, g.name, e.name, l.iso_code, cs.valid_indexes, cs.indexes, cs.valid_groups, cs.groups, cs.valid_keys, cs.keys, cs.size, cs.blocks ) t ORDER BY t.game ASC, t.environment ASC, t.language ASC, t.builds[1] ASC, t.timestamp ASC """.trimIndent() ).use { stmt -> stmt.executeQuery().use { rows -> val caches = mutableListOf() while (rows.next()) { val id = rows.getInt(1) val game = rows.getString(2) val environment = rows.getString(3) val language = rows.getString(4) val builds = rows.getArray(5).array as Array<*> val timestamp = rows.getTimestamp(6)?.toInstant() @Suppress("UNCHECKED_CAST") val sources = rows.getArray(7).array as Array val validIndexes = rows.getLong(8) val stats = if (!rows.wasNull()) { val indexes = rows.getLong(9) val validGroups = rows.getLong(10) val groups = rows.getLong(11) val validKeys = rows.getLong(12) val keys = rows.getLong(13) val size = rows.getLong(14) val blocks = rows.getLong(15) Stats(validIndexes, indexes, validGroups, groups, validKeys, keys, size, blocks) } else { null } caches += CacheSummary( id, game, environment, language, builds.mapNotNull { o -> Build.fromPgObject(o as PGobject) }.toSortedSet(), timestamp, sources.toSortedSet(), stats ) } caches } } } } public suspend fun get(id: Int): Cache? { return database.execute { connection -> val masterIndex: Js5MasterIndex? val checksumTable: ChecksumTable? val stats: Stats? connection.prepareStatement( """ SELECT m.format, mc.data, b.data, cs.valid_indexes, cs.indexes, cs.valid_groups, cs.groups, cs.valid_keys, cs.keys, cs.size, cs.blocks FROM caches c LEFT JOIN master_indexes m ON m.id = c.id LEFT JOIN containers mc ON mc.id = m.container_id LEFT JOIN crc_tables t ON t.id = c.id LEFT JOIN blobs b ON b.id = t.blob_id LEFT JOIN cache_stats cs ON cs.cache_id = c.id WHERE c.id = ? """.trimIndent() ).use { stmt -> stmt.setInt(1, id) stmt.executeQuery().use { rows -> if (!rows.next()) { return@execute null } val formatString = rows.getString(1) masterIndex = if (formatString != null) { Unpooled.wrappedBuffer(rows.getBytes(2)).use { compressed -> Js5Compression.uncompress(compressed).use { uncompressed -> val format = MasterIndexFormat.valueOf(formatString.uppercase()) Js5MasterIndex.readUnverified(uncompressed, format) } } } else { null } val blob = rows.getBytes(3) checksumTable = if (blob != null) { Unpooled.wrappedBuffer(blob).use { buf -> ChecksumTable.read(buf) } } else { null } val validIndexes = rows.getLong(4) stats = if (rows.wasNull()) { null } else { val indexes = rows.getLong(5) val validGroups = rows.getLong(6) val groups = rows.getLong(7) val validKeys = rows.getLong(8) val keys = rows.getLong(9) val size = rows.getLong(10) val blocks = rows.getLong(11) Stats(validIndexes, indexes, validGroups, groups, validKeys, keys, size, blocks) } } } val sources = mutableListOf() connection.prepareStatement( """ SELECT g.name, e.name, l.iso_code, s.build_major, s.build_minor, s.timestamp, s.name, s.description, s.url FROM sources s JOIN game_variants v ON v.id = s.game_id JOIN games g ON g.id = v.game_id JOIN environments e ON e.id = v.environment_id JOIN languages l ON l.id = v.language_id WHERE s.cache_id = ? ORDER BY s.name ASC """.trimIndent() ).use { stmt -> stmt.setInt(1, id) stmt.executeQuery().use { rows -> while (rows.next()) { val game = rows.getString(1) val environment = rows.getString(2) val language = rows.getString(3) var buildMajor: Int? = rows.getInt(4) if (rows.wasNull()) { buildMajor = null } var buildMinor: Int? = rows.getInt(5) if (rows.wasNull()) { buildMinor = null } val build = if (buildMajor != null) { Build(buildMajor, buildMinor) } else { null } val timestamp = rows.getTimestamp(6)?.toInstant() val name = rows.getString(7) val description = rows.getString(8) val url = rows.getString(9) sources += Source(game, environment, language, build, timestamp, name, description, url) } } } val updates = mutableListOf() connection.prepareStatement( """ SELECT url FROM updates WHERE cache_id = ? """.trimIndent() ).use { stmt -> stmt.setInt(1, id) stmt.executeQuery().use { rows -> while (rows.next()) { updates += rows.getString(1) } } } Cache(id, sources, updates, stats, masterIndex, checksumTable) } } public fun export(id: Int, storeFactory: (Boolean) -> Store) { database.executeOnce { connection -> val legacy = connection.prepareStatement( """ SELECT id FROM crc_tables WHERE id = ? """.trimIndent() ).use { stmt -> stmt.setInt(1, id) stmt.executeQuery().use { rows -> rows.next() } } storeFactory(legacy).use { store -> if (legacy) { exportLegacy(connection, id, store) } else { export(connection, id, store) } } } } private fun export(connection: Connection, id: Int, store: Store) { connection.prepareStatement( """ SELECT archive_id, group_id, data, version FROM resolved_groups WHERE master_index_id = ? """.trimIndent() ).use { stmt -> stmt.fetchSize = BATCH_SIZE stmt.setInt(1, id) stmt.executeQuery().use { rows -> alloc.buffer(2, 2).use { versionBuf -> store.create(Js5Archive.ARCHIVESET) while (rows.next()) { val archive = rows.getInt(1) val group = rows.getInt(2) val bytes = rows.getBytes(3) val version = rows.getInt(4) val versionNull = rows.wasNull() versionBuf.clear() if (!versionNull) { versionBuf.writeShort(version) } Unpooled.wrappedBuffer(Unpooled.wrappedBuffer(bytes), versionBuf.retain()).use { buf -> store.write(archive, group, buf) // ensure the .idx file exists even if it is empty if (archive == Js5Archive.ARCHIVESET) { store.create(group) } } } } } } } private fun exportLegacy(connection: Connection, id: Int, store: Store) { connection.prepareStatement( """ SELECT index_id, file_id, data, version FROM resolved_files WHERE crc_table_id = ? """.trimIndent() ).use { stmt -> stmt.fetchSize = BATCH_SIZE stmt.setInt(1, id) stmt.executeQuery().use { rows -> alloc.buffer(2, 2).use { versionBuf -> store.create(0) while (rows.next()) { val index = rows.getInt(1) val file = rows.getInt(2) val bytes = rows.getBytes(3) val version = rows.getInt(4) val versionNull = rows.wasNull() versionBuf.clear() if (!versionNull) { versionBuf.writeShort(version) } Unpooled.wrappedBuffer(Unpooled.wrappedBuffer(bytes), versionBuf.retain()).use { buf -> store.write(index, file, buf) } } } } } } public suspend fun exportKeys(id: Int): List { return database.execute { connection -> connection.prepareStatement( """ SELECT g.archive_id, g.group_id, g.name_hash, n.name, (k.key).k0, (k.key).k1, (k.key).k2, (k.key).k3 FROM resolved_groups g JOIN keys k ON k.id = g.key_id LEFT JOIN names n ON n.hash = g.name_hash AND n.name ~ '^l(?:[0-9]|[1-9][0-9])_(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$' WHERE g.master_index_id = ? """.trimIndent() ).use { stmt -> stmt.setInt(1, id) stmt.executeQuery().use { rows -> val keys = mutableListOf() while (rows.next()) { val archive = rows.getInt(1) val group = rows.getInt(2) var nameHash: Int? = rows.getInt(3) if (rows.wasNull()) { nameHash = null } val name = rows.getString(4) val k0 = rows.getInt(5) val k1 = rows.getInt(6) val k2 = rows.getInt(7) val k3 = rows.getInt(8) val mapSquare = getMapSquare(name) keys += Key(archive, group, nameHash, name, mapSquare, XteaKey(k0, k1, k2, k3)) } keys } } } } private companion object { private const val BATCH_SIZE = 256 private val LOC_NAME_REGEX = Regex("l(\\d+)_(\\d+)") private fun getMapSquare(name: String?): Int? { if (name == null) { return null } val match = LOC_NAME_REGEX.matchEntire(name) ?: return null val x = match.groupValues[1].toInt() val z = match.groupValues[2].toInt() return (x shl 8) or z } } }