Store master indexes in the database

I have a small collection of these from when I ran a service that polled
the JS5 server for the master index back in 2009. We'll probably be able
to find some in old Rune-Server threads too.

They'll be useful for verifying caches with an unclear provenance.

Signed-off-by: Graham <gpe@openrs2.org>
pull/132/head
Graham 3 years ago
parent 19b6893681
commit 30b605d719
  1. 2
      archive/src/main/kotlin/org/openrs2/archive/ArchiveCommand.kt
  2. 105
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt
  3. 16
      archive/src/main/kotlin/org/openrs2/archive/container/Container.kt
  4. 94
      archive/src/main/kotlin/org/openrs2/archive/container/ContainerImporter.kt
  5. 28
      archive/src/main/kotlin/org/openrs2/archive/masterindex/ImportCommand.kt
  6. 12
      archive/src/main/kotlin/org/openrs2/archive/masterindex/MasterIndexCommand.kt
  7. 63
      archive/src/main/kotlin/org/openrs2/archive/masterindex/MasterIndexImporter.kt
  8. 13
      archive/src/main/resources/org/openrs2/archive/V1__init.sql

@ -4,6 +4,7 @@ import com.github.ajalt.clikt.core.NoOpCliktCommand
import com.github.ajalt.clikt.core.subcommands
import org.openrs2.archive.cache.CacheCommand
import org.openrs2.archive.key.KeyCommand
import org.openrs2.archive.masterindex.MasterIndexCommand
import org.openrs2.archive.name.NameCommand
public fun main(args: Array<String>): Unit = ArchiveCommand().main(args)
@ -13,6 +14,7 @@ public class ArchiveCommand : NoOpCliktCommand(name = "archive") {
subcommands(
CacheCommand(),
KeyCommand(),
MasterIndexCommand(),
NameCommand()
)
}

@ -3,8 +3,8 @@ package org.openrs2.archive.cache
import io.netty.buffer.ByteBuf
import io.netty.buffer.ByteBufAllocator
import io.netty.buffer.ByteBufUtil
import io.netty.buffer.DefaultByteBufHolder
import org.openrs2.buffer.crc32
import org.openrs2.archive.container.Container
import org.openrs2.archive.container.ContainerImporter
import org.openrs2.buffer.use
import org.openrs2.cache.Js5Archive
import org.openrs2.cache.Js5Compression
@ -24,15 +24,6 @@ public class CacheImporter @Inject constructor(
private val database: Database,
private val alloc: ByteBufAllocator
) {
private abstract class Container(
data: ByteBuf
) : DefaultByteBufHolder(data) {
val bytes: ByteArray = ByteBufUtil.getBytes(data, data.readerIndex(), data.readableBytes(), false)
val crc32 = data.crc32()
val whirlpool = Whirlpool.whirlpool(bytes)
abstract val encrypted: Boolean
}
private class Index(
val archive: Int,
val index: Js5Index,
@ -51,28 +42,7 @@ public class CacheImporter @Inject constructor(
public suspend fun import(store: Store) {
database.execute { connection ->
connection.prepareStatement(
"""
LOCK TABLE containers IN EXCLUSIVE MODE
""".trimIndent()
).use { stmt ->
stmt.execute()
}
// create temporary tables
connection.prepareStatement(
"""
CREATE TEMPORARY TABLE tmp_containers (
index INTEGER NOT NULL,
crc32 INTEGER NOT NULL,
whirlpool BYTEA NOT NULL,
data BYTEA NOT NULL,
encrypted BOOLEAN NOT NULL
) ON COMMIT DROP
""".trimIndent()
).use { stmt ->
stmt.execute()
}
ContainerImporter.prepare(connection)
// import indexes
val indexes = mutableListOf<Index>()
@ -132,73 +102,8 @@ public class CacheImporter @Inject constructor(
}
}
private fun addContainer(connection: Connection, container: Container): Long {
return addContainers(connection, listOf(container)).single()
}
private fun addContainers(connection: Connection, containers: List<Container>): List<Long> {
connection.prepareStatement(
"""
TRUNCATE TABLE tmp_containers
""".trimIndent()
).use { stmt ->
stmt.execute()
}
connection.prepareStatement(
"""
INSERT INTO tmp_containers (index, crc32, whirlpool, data, encrypted)
VALUES (?, ?, ?, ?, ?)
""".trimIndent()
).use { stmt ->
for ((i, container) in containers.withIndex()) {
stmt.setInt(1, i)
stmt.setInt(2, container.crc32)
stmt.setBytes(3, container.whirlpool)
stmt.setBytes(4, container.bytes)
stmt.setBoolean(5, container.encrypted)
stmt.addBatch()
}
stmt.executeBatch()
}
connection.prepareStatement(
"""
INSERT INTO containers (crc32, whirlpool, data, encrypted)
SELECT t.crc32, t.whirlpool, t.data, t.encrypted
FROM tmp_containers t
LEFT JOIN containers c ON c.whirlpool = t.whirlpool
WHERE c.whirlpool IS NULL
ON CONFLICT DO NOTHING
""".trimIndent()
).use { stmt ->
stmt.execute()
}
val ids = mutableListOf<Long>()
connection.prepareStatement(
"""
SELECT c.id
FROM tmp_containers t
JOIN containers c ON c.whirlpool = t.whirlpool
ORDER BY t.index ASC
""".trimIndent()
).use { stmt ->
stmt.executeQuery().use { rows ->
while (rows.next()) {
ids += rows.getLong(1)
}
}
}
check(ids.size == containers.size)
return ids
}
private fun addGroups(connection: Connection, groups: List<Group>) {
val containerIds = addContainers(connection, groups)
val containerIds = ContainerImporter.addContainers(connection, groups)
connection.prepareStatement(
"""
@ -289,7 +194,7 @@ public class CacheImporter @Inject constructor(
// TODO(gpe): skip most of this function if we encounter a conflict?
private fun addIndex(connection: Connection, cacheId: Long, index: Index) {
val containerId = addContainer(connection, index)
val containerId = ContainerImporter.addContainer(connection, index)
connection.prepareStatement(
"""

@ -0,0 +1,16 @@
package org.openrs2.archive.container
import io.netty.buffer.ByteBuf
import io.netty.buffer.ByteBufUtil
import io.netty.buffer.DefaultByteBufHolder
import org.openrs2.buffer.crc32
import org.openrs2.crypto.Whirlpool
public abstract class Container(
data: ByteBuf
) : DefaultByteBufHolder(data) {
public val bytes: ByteArray = ByteBufUtil.getBytes(data, data.readerIndex(), data.readableBytes(), false)
public val crc32: Int = data.crc32()
public val whirlpool: ByteArray = Whirlpool.whirlpool(bytes)
public abstract val encrypted: Boolean
}

@ -0,0 +1,94 @@
package org.openrs2.archive.container
import java.sql.Connection
public object ContainerImporter {
public fun prepare(connection: Connection) {
connection.prepareStatement(
"""
LOCK TABLE containers IN EXCLUSIVE MODE
""".trimIndent()
).use { stmt ->
stmt.execute()
}
connection.prepareStatement(
"""
CREATE TEMPORARY TABLE tmp_containers (
index INTEGER NOT NULL,
crc32 INTEGER NOT NULL,
whirlpool BYTEA NOT NULL,
data BYTEA NOT NULL,
encrypted BOOLEAN NOT NULL
) ON COMMIT DROP
""".trimIndent()
).use { stmt ->
stmt.execute()
}
}
public fun addContainer(connection: Connection, container: Container): Long {
return addContainers(connection, listOf(container)).single()
}
public fun addContainers(connection: Connection, containers: List<Container>): List<Long> {
connection.prepareStatement(
"""
TRUNCATE TABLE tmp_containers
""".trimIndent()
).use { stmt ->
stmt.execute()
}
connection.prepareStatement(
"""
INSERT INTO tmp_containers (index, crc32, whirlpool, data, encrypted)
VALUES (?, ?, ?, ?, ?)
""".trimIndent()
).use { stmt ->
for ((i, container) in containers.withIndex()) {
stmt.setInt(1, i)
stmt.setInt(2, container.crc32)
stmt.setBytes(3, container.whirlpool)
stmt.setBytes(4, container.bytes)
stmt.setBoolean(5, container.encrypted)
stmt.addBatch()
}
stmt.executeBatch()
}
connection.prepareStatement(
"""
INSERT INTO containers (crc32, whirlpool, data, encrypted)
SELECT t.crc32, t.whirlpool, t.data, t.encrypted
FROM tmp_containers t
LEFT JOIN containers c ON c.whirlpool = t.whirlpool
WHERE c.whirlpool IS NULL
ON CONFLICT DO NOTHING
""".trimIndent()
).use { stmt ->
stmt.execute()
}
val ids = mutableListOf<Long>()
connection.prepareStatement(
"""
SELECT c.id
FROM tmp_containers t
JOIN containers c ON c.whirlpool = t.whirlpool
ORDER BY t.index ASC
""".trimIndent()
).use { stmt ->
stmt.executeQuery().use { rows ->
while (rows.next()) {
ids += rows.getLong(1)
}
}
}
check(ids.size == containers.size)
return ids
}
}

@ -0,0 +1,28 @@
package org.openrs2.archive.masterindex
import com.github.ajalt.clikt.core.CliktCommand
import com.github.ajalt.clikt.parameters.arguments.argument
import com.github.ajalt.clikt.parameters.types.path
import com.google.inject.Guice
import io.netty.buffer.Unpooled
import kotlinx.coroutines.runBlocking
import org.openrs2.archive.ArchiveModule
import org.openrs2.buffer.use
import java.nio.file.Files
public class ImportCommand : CliktCommand(name = "import") {
private val input by argument().path(
mustExist = true,
canBeDir = false,
mustBeReadable = true
)
override fun run(): Unit = runBlocking {
val injector = Guice.createInjector(ArchiveModule)
val importer = injector.getInstance(MasterIndexImporter::class.java)
Unpooled.wrappedBuffer(Files.readAllBytes(input)).use { buf ->
importer.import(buf)
}
}
}

@ -0,0 +1,12 @@
package org.openrs2.archive.masterindex
import com.github.ajalt.clikt.core.NoOpCliktCommand
import com.github.ajalt.clikt.core.subcommands
public class MasterIndexCommand : NoOpCliktCommand(name = "master-index") {
init {
subcommands(
ImportCommand()
)
}
}

@ -0,0 +1,63 @@
package org.openrs2.archive.masterindex
import io.netty.buffer.ByteBuf
import org.openrs2.archive.container.Container
import org.openrs2.archive.container.ContainerImporter
import org.openrs2.buffer.use
import org.openrs2.cache.Js5Compression
import org.openrs2.cache.Js5MasterIndex
import org.openrs2.db.Database
import javax.inject.Inject
import javax.inject.Singleton
@Singleton
public class MasterIndexImporter @Inject constructor(
private val database: Database
) {
private class MasterIndex(
data: ByteBuf,
val index: Js5MasterIndex
) : Container(data) {
override val encrypted: Boolean = false
}
public suspend fun import(buf: ByteBuf) {
database.execute { connection ->
ContainerImporter.prepare(connection)
val masterIndex = Js5Compression.uncompress(buf).use { uncompressed ->
MasterIndex(buf, Js5MasterIndex.read(uncompressed))
}
val containerId = ContainerImporter.addContainer(connection, masterIndex)
connection.prepareStatement(
"""
INSERT INTO master_indexes (container_id)
VALUES (?)
""".trimIndent()
).use { stmt ->
stmt.setLong(1, containerId)
stmt.execute()
}
connection.prepareStatement(
"""
INSERT INTO master_index_entries (container_id, archive_id, crc32, version)
VALUES (?, ?, ?, ?)
""".trimIndent()
).use { stmt ->
for ((i, entry) in masterIndex.index.entries.withIndex()) {
stmt.setLong(1, containerId)
stmt.setInt(2, i)
stmt.setInt(3, entry.checksum)
stmt.setInt(4, entry.version)
stmt.addBatch()
}
stmt.executeBatch()
}
}
}
}

@ -66,6 +66,19 @@ CREATE TABLE index_files (
FOREIGN KEY (container_id, group_id) REFERENCES index_groups (container_id, group_id)
);
CREATE TABLE master_indexes (
container_id BIGINT PRIMARY KEY NOT NULL REFERENCES containers (id)
);
CREATE TABLE master_index_entries (
container_id BIGINT NOT NULL REFERENCES master_indexes (container_id),
archive_id uint1 NOT NULL,
crc32 INTEGER NOT NULL,
whirlpool BYTEA NULL,
version INTEGER NOT NULL,
PRIMARY KEY (container_id, archive_id)
);
CREATE TABLE caches (
id BIGSERIAL PRIMARY KEY NOT NULL,
-- This doesn't correspond to a hash used by the client - it was just

Loading…
Cancel
Save