forked from openrs2/openrs2
parent
abac785456
commit
d755d486d6
@ -0,0 +1,40 @@ |
||||
plugins { |
||||
`maven-publish` |
||||
application |
||||
kotlin("jvm") |
||||
} |
||||
|
||||
application { |
||||
mainClassName = "dev.openrs2.archive.ArchiveCommandKt" |
||||
} |
||||
|
||||
dependencies { |
||||
api("com.github.ajalt.clikt:clikt:${Versions.clikt}") |
||||
|
||||
implementation(project(":buffer")) |
||||
implementation(project(":cache")) |
||||
implementation(project(":db")) |
||||
implementation(project(":json")) |
||||
implementation(project(":util")) |
||||
implementation("com.google.guava:guava:${Versions.guava}") |
||||
implementation("org.flywaydb:flyway-core:${Versions.flyway}") |
||||
implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:${Versions.kotlinCoroutines}") |
||||
implementation("org.postgresql:postgresql:${Versions.postgres}") |
||||
} |
||||
|
||||
publishing { |
||||
publications.create<MavenPublication>("maven") { |
||||
from(components["java"]) |
||||
|
||||
pom { |
||||
packaging = "jar" |
||||
name.set("OpenRS2 Archive") |
||||
description.set( |
||||
""" |
||||
Service for archiving clients, caches and XTEA keys in an |
||||
efficient deduplicated format. |
||||
""".trimIndent() |
||||
) |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,19 @@ |
||||
package dev.openrs2.archive |
||||
|
||||
import com.github.ajalt.clikt.core.NoOpCliktCommand |
||||
import com.github.ajalt.clikt.core.subcommands |
||||
import dev.openrs2.archive.cache.CacheCommand |
||||
import dev.openrs2.archive.key.KeyCommand |
||||
import dev.openrs2.archive.name.NameCommand |
||||
|
||||
public fun main(args: Array<String>): Unit = ArchiveCommand().main(args) |
||||
|
||||
public class ArchiveCommand : NoOpCliktCommand(name = "archive") { |
||||
init { |
||||
subcommands( |
||||
CacheCommand(), |
||||
KeyCommand(), |
||||
NameCommand() |
||||
) |
||||
} |
||||
} |
@ -0,0 +1,18 @@ |
||||
package dev.openrs2.archive |
||||
|
||||
import com.google.inject.AbstractModule |
||||
import com.google.inject.Scopes |
||||
import dev.openrs2.buffer.BufferModule |
||||
import dev.openrs2.db.Database |
||||
import dev.openrs2.json.JsonModule |
||||
|
||||
public object ArchiveModule : AbstractModule() { |
||||
override fun configure() { |
||||
install(BufferModule) |
||||
install(JsonModule) |
||||
|
||||
bind(Database::class.java) |
||||
.toProvider(DatabaseProvider::class.java) |
||||
.`in`(Scopes.SINGLETON) |
||||
} |
||||
} |
@ -0,0 +1,25 @@ |
||||
package dev.openrs2.archive |
||||
|
||||
import dev.openrs2.db.Database |
||||
import dev.openrs2.db.PostgresDeadlockDetector |
||||
import javax.inject.Provider |
||||
import org.flywaydb.core.Flyway |
||||
import org.postgresql.ds.PGSimpleDataSource |
||||
|
||||
public class DatabaseProvider : Provider<Database> { |
||||
override fun get(): Database { |
||||
val dataSource = PGSimpleDataSource() |
||||
// TODO(gpe): make the URL configurable |
||||
dataSource.setUrl("jdbc:postgresql://localhost/runearchive?user=gpe&password=gpe") |
||||
|
||||
Flyway.configure() |
||||
.dataSource(dataSource) |
||||
.locations("classpath:/dev/openrs2/archive") |
||||
.load() |
||||
.migrate() |
||||
|
||||
// TODO(gpe): wrap dataSource with HikariCP? how do we close the pool? |
||||
|
||||
return Database(dataSource, deadlockDetector = PostgresDeadlockDetector) |
||||
} |
||||
} |
@ -0,0 +1,13 @@ |
||||
package dev.openrs2.archive.cache |
||||
|
||||
import com.github.ajalt.clikt.core.NoOpCliktCommand |
||||
import com.github.ajalt.clikt.core.subcommands |
||||
|
||||
public class CacheCommand : NoOpCliktCommand(name = "cache") { |
||||
init { |
||||
subcommands( |
||||
ImportCommand(), |
||||
ExportCommand() |
||||
) |
||||
} |
||||
} |
@ -0,0 +1,65 @@ |
||||
package dev.openrs2.archive.cache |
||||
|
||||
import dev.openrs2.buffer.use |
||||
import dev.openrs2.cache.Store |
||||
import dev.openrs2.db.Database |
||||
import io.netty.buffer.ByteBufAllocator |
||||
import io.netty.buffer.Unpooled |
||||
import javax.inject.Inject |
||||
import javax.inject.Singleton |
||||
|
||||
@Singleton |
||||
public class CacheExporter @Inject constructor( |
||||
private val database: Database, |
||||
private val alloc: ByteBufAllocator |
||||
) { |
||||
public suspend fun export(id: Long, store: Store) { |
||||
// TODO(gpe): think about what to do if there is a collision |
||||
database.execute { connection -> |
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT 255::uint1, ci.archive_id::INTEGER, c.data, NULL |
||||
FROM cache_indexes ci |
||||
JOIN containers c ON c.id = ci.container_id |
||||
WHERE ci.cache_id = ? |
||||
UNION ALL |
||||
SELECT ci.archive_id, ig.group_id, c.data, g.truncated_version |
||||
FROM cache_indexes ci |
||||
JOIN index_groups ig ON ig.container_id = ci.container_id |
||||
JOIN groups g ON g.archive_id = ci.archive_id AND g.group_id = ig.group_id AND g.truncated_version = ig.version & 65535 |
||||
JOIN containers c ON c.id = g.container_id AND c.crc32 = ig.crc32 |
||||
WHERE ci.cache_id = ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.fetchSize = BATCH_SIZE |
||||
stmt.setLong(1, id) |
||||
stmt.setLong(2, id) |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
alloc.buffer(2, 2).use { versionBuf -> |
||||
while (rows.next()) { |
||||
val archive = rows.getInt(1) |
||||
val group = rows.getInt(2) |
||||
val bytes = rows.getBytes(3) |
||||
val version = rows.getInt(4) |
||||
val versionNull = rows.wasNull() |
||||
|
||||
versionBuf.clear() |
||||
if (!versionNull) { |
||||
versionBuf.writeShort(version) |
||||
} |
||||
|
||||
Unpooled.wrappedBuffer(Unpooled.wrappedBuffer(bytes), versionBuf.retain()).use { buf -> |
||||
store.write(archive, group, buf) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
private companion object { |
||||
private val BATCH_SIZE = 1024 |
||||
} |
||||
} |
@ -0,0 +1,375 @@ |
||||
package dev.openrs2.archive.cache |
||||
|
||||
import dev.openrs2.buffer.crc32 |
||||
import dev.openrs2.buffer.use |
||||
import dev.openrs2.cache.Js5Archive |
||||
import dev.openrs2.cache.Js5Compression |
||||
import dev.openrs2.cache.Js5Index |
||||
import dev.openrs2.cache.Store |
||||
import dev.openrs2.cache.VersionTrailer |
||||
import dev.openrs2.crypto.Whirlpool |
||||
import dev.openrs2.db.Database |
||||
import io.netty.buffer.ByteBuf |
||||
import io.netty.buffer.ByteBufAllocator |
||||
import io.netty.buffer.ByteBufUtil |
||||
import io.netty.util.ReferenceCounted |
||||
import java.io.IOException |
||||
import java.sql.Connection |
||||
import java.sql.Types |
||||
import javax.inject.Inject |
||||
import javax.inject.Singleton |
||||
|
||||
@Singleton |
||||
public class CacheImporter @Inject constructor( |
||||
private val database: Database, |
||||
private val alloc: ByteBufAllocator |
||||
) { |
||||
private abstract class Container( |
||||
private val data: ByteBuf |
||||
) : ReferenceCounted by data { |
||||
val bytes: ByteArray = ByteBufUtil.getBytes(data, data.readerIndex(), data.readableBytes(), false) |
||||
val crc32 = data.crc32() |
||||
val whirlpool = Whirlpool.whirlpool(bytes) |
||||
abstract val encrypted: Boolean |
||||
} |
||||
|
||||
private class Index( |
||||
val archive: Int, |
||||
val index: Js5Index, |
||||
data: ByteBuf |
||||
) : Container(data) { |
||||
override val encrypted: Boolean = false |
||||
} |
||||
|
||||
private class Group( |
||||
val archive: Int, |
||||
val group: Int, |
||||
data: ByteBuf, |
||||
val version: Int, |
||||
override val encrypted: Boolean |
||||
) : Container(data) |
||||
|
||||
public suspend fun import(store: Store) { |
||||
database.execute { connection -> |
||||
connection.prepareStatement( |
||||
""" |
||||
LOCK TABLE containers IN EXCLUSIVE MODE |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
|
||||
// create temporary tables |
||||
connection.prepareStatement( |
||||
""" |
||||
CREATE TEMPORARY TABLE tmp_containers ( |
||||
index INTEGER NOT NULL, |
||||
crc32 INTEGER NOT NULL, |
||||
whirlpool BYTEA NOT NULL, |
||||
data BYTEA NOT NULL, |
||||
encrypted BOOLEAN NOT NULL |
||||
) ON COMMIT DROP |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
|
||||
// import indexes |
||||
val indexes = mutableListOf<Index>() |
||||
try { |
||||
for (archive in store.list(Js5Archive.ARCHIVESET)) { |
||||
indexes += readIndex(store, archive) |
||||
} |
||||
|
||||
val cacheId = addCache(connection, indexes) |
||||
|
||||
for (index in indexes) { |
||||
addIndex(connection, cacheId, index) |
||||
} |
||||
} finally { |
||||
indexes.forEach(Index::release) |
||||
} |
||||
|
||||
// import groups |
||||
val groups = mutableListOf<Group>() |
||||
try { |
||||
for (archive in store.list()) { |
||||
if (archive == Js5Archive.ARCHIVESET) { |
||||
continue |
||||
} |
||||
|
||||
for (id in store.list(archive)) { |
||||
val group = readGroup(store, archive, id) ?: continue |
||||
groups += group |
||||
|
||||
if (groups.size >= BATCH_SIZE) { |
||||
addGroups(connection, groups) |
||||
|
||||
groups.forEach(Group::release) |
||||
groups.clear() |
||||
} |
||||
} |
||||
} |
||||
|
||||
if (groups.isNotEmpty()) { |
||||
addGroups(connection, groups) |
||||
} |
||||
} finally { |
||||
groups.forEach(Group::release) |
||||
} |
||||
} |
||||
} |
||||
|
||||
private fun readGroup(store: Store, archive: Int, group: Int): Group? { |
||||
try { |
||||
store.read(archive, group).use { buf -> |
||||
val version = VersionTrailer.strip(buf) ?: return null |
||||
val encrypted = Js5Compression.isEncrypted(buf.slice()) |
||||
return Group(archive, group, buf.retain(), version, encrypted) |
||||
} |
||||
} catch (ex: IOException) { |
||||
return null |
||||
} |
||||
} |
||||
|
||||
private fun addContainer(connection: Connection, container: Container): Long { |
||||
return addContainers(connection, listOf(container)).single() |
||||
} |
||||
|
||||
private fun addContainers(connection: Connection, containers: List<Container>): List<Long> { |
||||
connection.prepareStatement( |
||||
""" |
||||
TRUNCATE TABLE tmp_containers |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO tmp_containers (index, crc32, whirlpool, data, encrypted) |
||||
VALUES (?, ?, ?, ?, ?) |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
for ((i, container) in containers.withIndex()) { |
||||
stmt.setInt(1, i) |
||||
stmt.setInt(2, container.crc32) |
||||
stmt.setBytes(3, container.whirlpool) |
||||
stmt.setBytes(4, container.bytes) |
||||
stmt.setBoolean(5, container.encrypted) |
||||
stmt.addBatch() |
||||
} |
||||
|
||||
stmt.executeBatch() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO containers (crc32, whirlpool, data, encrypted) |
||||
SELECT t.crc32, t.whirlpool, t.data, t.encrypted |
||||
FROM tmp_containers t |
||||
LEFT JOIN containers c ON c.whirlpool = t.whirlpool |
||||
WHERE c.whirlpool IS NULL |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
|
||||
val ids = mutableListOf<Long>() |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT c.id |
||||
FROM tmp_containers t |
||||
JOIN containers c ON c.whirlpool = t.whirlpool |
||||
ORDER BY t.index ASC |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.executeQuery().use { rows -> |
||||
while (rows.next()) { |
||||
ids += rows.getLong(1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
check(ids.size == containers.size) |
||||
return ids |
||||
} |
||||
|
||||
private fun addGroups(connection: Connection, groups: List<Group>) { |
||||
val containerIds = addContainers(connection, groups) |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO groups (archive_id, group_id, container_id, truncated_version) |
||||
VALUES (?, ?, ?, ?) |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
for ((i, group) in groups.withIndex()) { |
||||
stmt.setInt(1, group.archive) |
||||
stmt.setInt(2, group.group) |
||||
stmt.setLong(3, containerIds[i]) |
||||
stmt.setInt(4, group.version) |
||||
stmt.addBatch() |
||||
} |
||||
|
||||
stmt.executeBatch() |
||||
} |
||||
} |
||||
|
||||
private fun readIndex(store: Store, archive: Int): Index { |
||||
return store.read(Js5Archive.ARCHIVESET, archive).use { buf -> |
||||
Js5Compression.uncompress(buf.slice()).use { uncompressed -> |
||||
Index(archive, Js5Index.read(uncompressed), buf.retain()) |
||||
} |
||||
} |
||||
} |
||||
|
||||
private fun addCache(connection: Connection, indexes: List<Index>): Long { |
||||
val len = indexes.size * (1 + Whirlpool.DIGESTBYTES) |
||||
val whirlpool = alloc.buffer(len, len).use { buf -> |
||||
for (index in indexes) { |
||||
buf.writeByte(index.archive) |
||||
buf.writeBytes(index.whirlpool) |
||||
} |
||||
|
||||
Whirlpool.whirlpool(ByteBufUtil.getBytes(buf, 0, buf.readableBytes(), false)) |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT id |
||||
FROM caches |
||||
WHERE whirlpool = ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setBytes(1, whirlpool) |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
if (rows.next()) { |
||||
return rows.getLong(1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO caches (whirlpool) |
||||
VALUES (?) |
||||
ON CONFLICT DO NOTHING |
||||
RETURNING id |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setBytes(1, whirlpool) |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
if (rows.next()) { |
||||
rows.getLong(1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT id |
||||
FROM caches |
||||
WHERE whirlpool = ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setBytes(1, whirlpool) |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
check(rows.next()) |
||||
return rows.getLong(1) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// TODO(gpe): skip most of this function if we encounter a conflict? |
||||
private fun addIndex(connection: Connection, cacheId: Long, index: Index) { |
||||
val containerId = addContainer(connection, index) |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO indexes (container_id, version) |
||||
VALUES (?, ?) |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setLong(1, containerId) |
||||
stmt.setInt(2, index.index.version) |
||||
stmt.execute() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO cache_indexes (cache_id, archive_id, container_id) |
||||
VALUES (?, ?, ?) |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setLong(1, cacheId) |
||||
stmt.setInt(2, index.archive) |
||||
stmt.setLong(3, containerId) |
||||
stmt.execute() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO index_groups (container_id, group_id, crc32, whirlpool, version, name_hash) |
||||
VALUES (?, ?, ?, ?, ?, ?) |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
for (group in index.index) { |
||||
stmt.setLong(1, containerId) |
||||
stmt.setInt(2, group.id) |
||||
stmt.setInt(3, group.checksum) |
||||
stmt.setBytes(4, group.digest) |
||||
stmt.setInt(5, group.version) |
||||
|
||||
if (index.index.hasNames) { |
||||
stmt.setInt(6, group.nameHash) |
||||
} else { |
||||
stmt.setNull(6, Types.INTEGER) |
||||
} |
||||
|
||||
stmt.addBatch() |
||||
} |
||||
|
||||
stmt.executeBatch() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO index_files (container_id, group_id, file_id, name_hash) |
||||
VALUES (?, ?, ?, ?) |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
for (group in index.index) { |
||||
for (file in group) { |
||||
stmt.setLong(1, containerId) |
||||
stmt.setInt(2, group.id) |
||||
stmt.setInt(3, file.id) |
||||
|
||||
if (index.index.hasNames) { |
||||
stmt.setInt(4, file.nameHash) |
||||
} else { |
||||
stmt.setNull(4, Types.INTEGER) |
||||
} |
||||
|
||||
stmt.addBatch() |
||||
} |
||||
} |
||||
|
||||
stmt.executeBatch() |
||||
} |
||||
} |
||||
|
||||
private companion object { |
||||
private val BATCH_SIZE = 1024 |
||||
} |
||||
} |
@ -0,0 +1,29 @@ |
||||
package dev.openrs2.archive.cache |
||||
|
||||
import com.github.ajalt.clikt.core.CliktCommand |
||||
import com.github.ajalt.clikt.parameters.arguments.argument |
||||
import com.github.ajalt.clikt.parameters.types.long |
||||
import com.github.ajalt.clikt.parameters.types.path |
||||
import com.google.inject.Guice |
||||
import dev.openrs2.archive.ArchiveModule |
||||
import dev.openrs2.cache.DiskStore |
||||
import kotlinx.coroutines.runBlocking |
||||
|
||||
public class ExportCommand : CliktCommand(name = "export") { |
||||
private val id by argument().long() |
||||
private val output by argument().path( |
||||
mustExist = true, |
||||
canBeFile = false, |
||||
mustBeReadable = true, |
||||
mustBeWritable = true |
||||
) |
||||
|
||||
override fun run(): Unit = runBlocking { |
||||
val injector = Guice.createInjector(ArchiveModule) |
||||
val exporter = injector.getInstance(CacheExporter::class.java) |
||||
|
||||
DiskStore.create(output).use { store -> |
||||
exporter.export(id, store) |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,26 @@ |
||||
package dev.openrs2.archive.cache |
||||
|
||||
import com.github.ajalt.clikt.core.CliktCommand |
||||
import com.github.ajalt.clikt.parameters.arguments.argument |
||||
import com.github.ajalt.clikt.parameters.types.path |
||||
import com.google.inject.Guice |
||||
import dev.openrs2.archive.ArchiveModule |
||||
import dev.openrs2.cache.Store |
||||
import kotlinx.coroutines.runBlocking |
||||
|
||||
public class ImportCommand : CliktCommand(name = "import") { |
||||
private val input by argument().path( |
||||
mustExist = true, |
||||
canBeFile = false, |
||||
mustBeReadable = true |
||||
) |
||||
|
||||
override fun run(): Unit = runBlocking { |
||||
val injector = Guice.createInjector(ArchiveModule) |
||||
val importer = injector.getInstance(CacheImporter::class.java) |
||||
|
||||
Store.open(input).use { store -> |
||||
importer.import(store) |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,15 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import com.github.ajalt.clikt.core.CliktCommand |
||||
import com.google.inject.Guice |
||||
import dev.openrs2.archive.ArchiveModule |
||||
import kotlinx.coroutines.runBlocking |
||||
|
||||
public class BruteForceCommand : CliktCommand(name = "brute-force") { |
||||
override fun run(): Unit = runBlocking { |
||||
val injector = Guice.createInjector(ArchiveModule) |
||||
val bruteForcer = injector.getInstance(KeyBruteForcer::class.java) |
||||
|
||||
bruteForcer.bruteForce() |
||||
} |
||||
} |
@ -0,0 +1,13 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import dev.openrs2.crypto.XteaKey |
||||
import java.io.InputStream |
||||
|
||||
public object HexKeyReader : KeyReader { |
||||
override fun read(input: InputStream): Sequence<XteaKey> { |
||||
return input.bufferedReader() |
||||
.lineSequence() |
||||
.map(XteaKey::fromHexOrNull) |
||||
.filterNotNull() |
||||
} |
||||
} |
@ -0,0 +1,22 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import com.github.ajalt.clikt.core.CliktCommand |
||||
import com.github.ajalt.clikt.parameters.arguments.argument |
||||
import com.github.ajalt.clikt.parameters.types.path |
||||
import com.google.inject.Guice |
||||
import dev.openrs2.archive.ArchiveModule |
||||
import kotlinx.coroutines.runBlocking |
||||
|
||||
public class ImportCommand : CliktCommand(name = "import") { |
||||
private val input by argument().path( |
||||
mustExist = true, |
||||
mustBeReadable = true |
||||
) |
||||
|
||||
override fun run(): Unit = runBlocking { |
||||
val injector = Guice.createInjector(ArchiveModule) |
||||
val importer = injector.getInstance(KeyImporter::class.java) |
||||
|
||||
importer.import(input) |
||||
} |
||||
} |
@ -0,0 +1,32 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper |
||||
import dev.openrs2.crypto.XteaKey |
||||
import dev.openrs2.json.Json |
||||
import java.io.InputStream |
||||
import javax.inject.Inject |
||||
import javax.inject.Singleton |
||||
|
||||
@Singleton |
||||
public class JsonKeyReader @Inject constructor( |
||||
@Json private val mapper: ObjectMapper |
||||
) : KeyReader { |
||||
override fun read(input: InputStream): Sequence<XteaKey> { |
||||
return sequence { |
||||
for (mapSquare in mapper.readTree(input)) { |
||||
val key = mapSquare["key"] ?: mapSquare["keys"] ?: continue |
||||
|
||||
if (key.size() != 4) { |
||||
continue |
||||
} |
||||
|
||||
val k0 = key[0].asText().toIntOrNull() ?: continue |
||||
val k1 = key[1].asText().toIntOrNull() ?: continue |
||||
val k2 = key[2].asText().toIntOrNull() ?: continue |
||||
val k3 = key[3].asText().toIntOrNull() ?: continue |
||||
|
||||
yield(XteaKey(k0, k1, k2, k3)) |
||||
} |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,288 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import dev.openrs2.buffer.use |
||||
import dev.openrs2.cache.Js5Compression |
||||
import dev.openrs2.crypto.XteaKey |
||||
import dev.openrs2.db.Database |
||||
import io.netty.buffer.Unpooled |
||||
import java.sql.Connection |
||||
import java.sql.Types |
||||
import javax.inject.Inject |
||||
import javax.inject.Singleton |
||||
|
||||
@Singleton |
||||
public class KeyBruteForcer @Inject constructor( |
||||
private val database: Database |
||||
) { |
||||
/* |
||||
* The code for writing to the containers and keys tables ensures that the |
||||
* row IDs are allocated monotonically (by forbidding any other |
||||
* transactions from writing simultaneously with an EXCLUSIVE table lock). |
||||
* |
||||
* Rather than storing a list of (container, key) pairs which have yet to |
||||
* be tested (or have already been tested), which would take O(n*m) space, |
||||
* the containers/keys are tested in order. This means we only need to |
||||
* store the IDs of the last container/key tested. |
||||
* |
||||
* If the container/key combinations are represented as a matrix, it looks |
||||
* like the diagram below: |
||||
* |
||||
* containers -> |
||||
* +----------+----------+ |
||||
* k |##########| | |
||||
* e |##########| A | |
||||
* y |##########| | |
||||
* s +----------+----------+ |
||||
* | | | |
||||
* | | C | B | |
||||
* v | | | |
||||
* +----------+----------+ |
||||
* |
||||
* The shaded are represents combinations that have already been tried. |
||||
* |
||||
* When a new container is inserted, we test it against every key in the |
||||
* shaded area (quadrant A). |
||||
* |
||||
* When a new key is inserted, we test it against every container in the |
||||
* shaded area (quadrant C). |
||||
* |
||||
* If keys and containers are inserted simultaneously, we take care to |
||||
* avoid testing them twice (quadrant B) by testing new containers against |
||||
* all keys but not vice-versa. |
||||
* |
||||
* This code can't tolerate new IDs being inserted while it runs, so it |
||||
* locks the tables in SHARE mode. This prevents the import process from |
||||
* running (which takes EXCLUSIVE locks) but allows other processes to read |
||||
* from the tables. |
||||
*/ |
||||
public suspend fun bruteForce() { |
||||
database.execute { connection -> |
||||
connection.prepareStatement( |
||||
""" |
||||
LOCK TABLE containers, keys IN SHARE MODE |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
|
||||
bruteForceNewContainers(connection) // A, B |
||||
bruteForceNewKeys(connection) // C |
||||
} |
||||
} |
||||
|
||||
private fun bruteForceNewContainers(connection: Connection) { |
||||
var lastContainerId: Long? |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT last_container_id |
||||
FROM brute_force_iterator |
||||
FOR UPDATE |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.executeQuery().use { rows -> |
||||
check(rows.next()) |
||||
|
||||
lastContainerId = rows.getLong(1) |
||||
if (rows.wasNull()) { |
||||
lastContainerId = null |
||||
} |
||||
} |
||||
} |
||||
|
||||
while (true) { |
||||
val pair = nextContainer(connection, lastContainerId) ?: break |
||||
val (containerId, data) = pair |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT id, (key).k0, (key).k1, (key).k2, (key).k3 |
||||
FROM keys |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.fetchSize = BATCH_SIZE |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
while (rows.next()) { |
||||
val keyId = rows.getLong(1) |
||||
|
||||
val k0 = rows.getInt(2) |
||||
val k1 = rows.getInt(3) |
||||
val k2 = rows.getInt(4) |
||||
val k3 = rows.getInt(5) |
||||
val key = XteaKey(k0, k1, k2, k3) |
||||
|
||||
val valid = Unpooled.wrappedBuffer(data).use { buf -> |
||||
Js5Compression.isKeyValid(buf, key) |
||||
} |
||||
|
||||
if (valid) { |
||||
connection.prepareStatement( |
||||
""" |
||||
UPDATE containers |
||||
SET key_id = ? |
||||
WHERE id = ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setLong(1, keyId) |
||||
stmt.setLong(2, containerId) |
||||
stmt.execute() |
||||
} |
||||
break |
||||
} |
||||
} |
||||
} |
||||
|
||||
lastContainerId = containerId |
||||
} |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
UPDATE brute_force_iterator |
||||
SET last_container_id = ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setObject(1, lastContainerId, Types.BIGINT) |
||||
stmt.execute() |
||||
} |
||||
} |
||||
|
||||
private fun nextContainer(connection: Connection, lastContainerId: Long?): Pair<Long, ByteArray>? { |
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT id, data |
||||
FROM containers |
||||
WHERE (? IS NULL OR id > ?) AND encrypted AND key_id IS NULL |
||||
LIMIT 1 |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setObject(1, lastContainerId, Types.BIGINT) |
||||
stmt.setObject(2, lastContainerId, Types.BIGINT) |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
if (!rows.next()) { |
||||
return null |
||||
} |
||||
|
||||
val containerId = rows.getLong(1) |
||||
val data = rows.getBytes(2) |
||||
return Pair(containerId, data) |
||||
} |
||||
} |
||||
} |
||||
|
||||
private fun bruteForceNewKeys(connection: Connection) { |
||||
var lastKeyId: Long? |
||||
var lastContainerId: Long |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT last_key_id, last_container_id |
||||
FROM brute_force_iterator |
||||
FOR UPDATE |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.executeQuery().use { rows -> |
||||
check(rows.next()) |
||||
|
||||
lastKeyId = rows.getLong(1) |
||||
if (rows.wasNull()) { |
||||
lastKeyId = null |
||||
} |
||||
|
||||
lastContainerId = rows.getLong(2) |
||||
if (rows.wasNull()) { |
||||
return@bruteForceNewKeys |
||||
} |
||||
} |
||||
} |
||||
|
||||
while (true) { |
||||
val pair = nextKey(connection, lastKeyId) ?: break |
||||
val (keyId, key) = pair |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT id, data |
||||
FROM containers |
||||
WHERE encrypted AND key_id IS NULL AND id < ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.fetchSize = BATCH_SIZE |
||||
stmt.setLong(1, lastContainerId) |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
while (rows.next()) { |
||||
val containerId = rows.getLong(1) |
||||
val data = rows.getBytes(2) |
||||
|
||||
val valid = Unpooled.wrappedBuffer(data).use { buf -> |
||||
Js5Compression.isKeyValid(buf, key) |
||||
} |
||||
|
||||
if (valid) { |
||||
connection.prepareStatement( |
||||
""" |
||||
UPDATE containers |
||||
SET key_id = ? |
||||
WHERE id = ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setLong(1, keyId) |
||||
stmt.setLong(2, containerId) |
||||
stmt.execute() |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
lastKeyId = keyId |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
UPDATE brute_force_iterator |
||||
SET last_key_id = ? |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setObject(1, lastKeyId, Types.BIGINT) |
||||
stmt.execute() |
||||
} |
||||
} |
||||
|
||||
private fun nextKey(connection: Connection, lastKeyId: Long?): Pair<Long, XteaKey>? { |
||||
connection.prepareStatement( |
||||
""" |
||||
SELECT id, (key).k0, (key).k1, (key).k2, (key).k3 |
||||
FROM keys |
||||
WHERE ? IS NULL OR id > ? |
||||
LIMIT 1 |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.setObject(1, lastKeyId, Types.BIGINT) |
||||
stmt.setObject(2, lastKeyId, Types.BIGINT) |
||||
|
||||
stmt.executeQuery().use { rows -> |
||||
if (!rows.next()) { |
||||
return null |
||||
} |
||||
|
||||
val keyId = rows.getLong(1) |
||||
|
||||
val k0 = rows.getInt(2) |
||||
val k1 = rows.getInt(3) |
||||
val k2 = rows.getInt(4) |
||||
val k3 = rows.getInt(5) |
||||
val key = XteaKey(k0, k1, k2, k3) |
||||
|
||||
return Pair(keyId, key) |
||||
} |
||||
} |
||||
} |
||||
|
||||
private companion object { |
||||
private const val BATCH_SIZE = 1024 |
||||
} |
||||
} |
@ -0,0 +1,13 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import com.github.ajalt.clikt.core.NoOpCliktCommand |
||||
import com.github.ajalt.clikt.core.subcommands |
||||
|
||||
public class KeyCommand : NoOpCliktCommand(name = "key") { |
||||
init { |
||||
subcommands( |
||||
BruteForceCommand(), |
||||
ImportCommand() |
||||
) |
||||
} |
||||
} |
@ -0,0 +1,96 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import dev.openrs2.crypto.XteaKey |
||||
import dev.openrs2.db.Database |
||||
import java.nio.file.Files |
||||
import java.nio.file.Path |
||||
import javax.inject.Inject |
||||
import javax.inject.Singleton |
||||
|
||||
@Singleton |
||||
public class KeyImporter @Inject constructor( |
||||
private val database: Database, |
||||
private val jsonKeyReader: JsonKeyReader |
||||
) { |
||||
public suspend fun import(path: Path) { |
||||
val keys = mutableSetOf<XteaKey>() |
||||
|
||||
for (file in Files.walk(path)) { |
||||
if (!Files.isRegularFile(file)) { |
||||
continue |
||||
} |
||||
|
||||
val name = file.fileName.toString() |
||||
val reader = when { |
||||
name.endsWith(".hex") -> HexKeyReader |
||||
name.endsWith(".txt") -> TextKeyReader |
||||
name.endsWith(".json") -> jsonKeyReader |
||||
else -> continue |
||||
} |
||||
|
||||
Files.newInputStream(file).use { input -> |
||||
keys += reader.read(input) |
||||
} |
||||
} |
||||
|
||||
keys -= XteaKey.ZERO |
||||
|
||||
return import(keys) |
||||
} |
||||
|
||||
public suspend fun import(keys: Iterable<XteaKey>) { |
||||
database.execute { connection -> |
||||
connection.prepareStatement( |
||||
""" |
||||
LOCK TABLE keys IN EXCLUSIVE MODE |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
CREATE TEMPORARY TABLE tmp_keys ( |
||||
key xtea_key NOT NULL |
||||
) ON COMMIT DROP |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO tmp_keys (key) |
||||
VALUES (ROW(?, ?, ?, ?)) |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
for (key in keys) { |
||||
if (key.isZero) { |
||||
continue |
||||
} |
||||
|
||||
stmt.setInt(1, key.k0) |
||||
stmt.setInt(2, key.k1) |
||||
stmt.setInt(3, key.k2) |
||||
stmt.setInt(4, key.k3) |
||||
stmt.addBatch() |
||||
} |
||||
|
||||
stmt.executeBatch() |
||||
} |
||||
|
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO keys (key) |
||||
SELECT t.key |
||||
FROM tmp_keys t |
||||
LEFT JOIN keys k ON k.key = t.key |
||||
WHERE k.key IS NULL |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
stmt.execute() |
||||
} |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,8 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import dev.openrs2.crypto.XteaKey |
||||
import java.io.InputStream |
||||
|
||||
public interface KeyReader { |
||||
public fun read(input: InputStream): Sequence<XteaKey> |
||||
} |
@ -0,0 +1,17 @@ |
||||
package dev.openrs2.archive.key |
||||
|
||||
import dev.openrs2.crypto.XteaKey |
||||
import java.io.InputStream |
||||
|
||||
public object TextKeyReader : KeyReader { |
||||
override fun read(input: InputStream): Sequence<XteaKey> { |
||||
val reader = input.bufferedReader() |
||||
|
||||
val k0 = reader.readLine().toIntOrNull() ?: return emptySequence() |
||||
val k1 = reader.readLine().toIntOrNull() ?: return emptySequence() |
||||
val k2 = reader.readLine().toIntOrNull() ?: return emptySequence() |
||||
val k3 = reader.readLine().toIntOrNull() ?: return emptySequence() |
||||
|
||||
return sequenceOf(XteaKey(k0, k1, k2, k3)) |
||||
} |
||||
} |
@ -0,0 +1,29 @@ |
||||
package dev.openrs2.archive.name |
||||
|
||||
import com.github.ajalt.clikt.core.CliktCommand |
||||
import com.google.inject.Guice |
||||
import dev.openrs2.archive.ArchiveModule |
||||
import kotlinx.coroutines.runBlocking |
||||
|
||||
public class GenerateCommand : CliktCommand(name = "generate") { |
||||
override fun run(): Unit = runBlocking { |
||||
val injector = Guice.createInjector(ArchiveModule) |
||||
val importer = injector.getInstance(NameImporter::class.java) |
||||
|
||||
val names = mutableSetOf<String>() |
||||
|
||||
for (x in 0..255) { |
||||
for (z in 0..255) { |
||||
for (prefix in PREFIXES) { |
||||
names += "$prefix${x}_$z" |
||||
} |
||||
} |
||||
} |
||||
|
||||
importer.import(names) |
||||
} |
||||
|
||||
private companion object { |
||||
private val PREFIXES = setOf("m", "um", "l", "ul", "n") |
||||
} |
||||
} |
@ -0,0 +1,22 @@ |
||||
package dev.openrs2.archive.name |
||||
|
||||
import com.github.ajalt.clikt.core.CliktCommand |
||||
import com.github.ajalt.clikt.parameters.options.option |
||||
import com.github.ajalt.clikt.parameters.types.defaultStdin |
||||
import com.github.ajalt.clikt.parameters.types.inputStream |
||||
import com.google.inject.Guice |
||||
import dev.openrs2.archive.ArchiveModule |
||||
import kotlinx.coroutines.runBlocking |
||||
|
||||
public class ImportCommand : CliktCommand(name = "import") { |
||||
private val input by option().inputStream().defaultStdin() |
||||
|
||||
override fun run(): Unit = runBlocking { |
||||
val injector = Guice.createInjector(ArchiveModule) |
||||
val importer = injector.getInstance(NameImporter::class.java) |
||||
|
||||
input.bufferedReader().useLines { lines -> |
||||
importer.import(lines.toSet()) |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,13 @@ |
||||
package dev.openrs2.archive.name |
||||
|
||||
import com.github.ajalt.clikt.core.NoOpCliktCommand |
||||
import com.github.ajalt.clikt.core.subcommands |
||||
|
||||
public class NameCommand : NoOpCliktCommand(name = "name") { |
||||
init { |
||||
subcommands( |
||||
GenerateCommand(), |
||||
ImportCommand() |
||||
) |
||||
} |
||||
} |
@ -0,0 +1,31 @@ |
||||
package dev.openrs2.archive.name |
||||
|
||||
import dev.openrs2.db.Database |
||||
import dev.openrs2.util.krHashCode |
||||
import javax.inject.Inject |
||||
import javax.inject.Singleton |
||||
|
||||
@Singleton |
||||
public class NameImporter @Inject constructor( |
||||
private val database: Database |
||||
) { |
||||
public suspend fun import(names: Iterable<String>) { |
||||
database.execute { connection -> |
||||
connection.prepareStatement( |
||||
""" |
||||
INSERT INTO names (hash, name) |
||||
VALUES (?, ?) |
||||
ON CONFLICT DO NOTHING |
||||
""".trimIndent() |
||||
).use { stmt -> |
||||
for (name in names) { |
||||
stmt.setInt(1, name.krHashCode()) |
||||
stmt.setString(2, name) |
||||
stmt.addBatch() |
||||
} |
||||
|
||||
stmt.executeBatch() |
||||
} |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,94 @@ |
||||
-- @formatter:off |
||||
CREATE EXTENSION uint; |
||||
|
||||
CREATE TYPE xtea_key AS ( |
||||
k0 INTEGER, |
||||
k1 INTEGER, |
||||
k2 INTEGER, |
||||
k3 INTEGER |
||||
); |
||||
|
||||
CREATE TABLE keys ( |
||||
id BIGSERIAL PRIMARY KEY NOT NULL, |
||||
key xtea_key UNIQUE NOT NULL CHECK ((key).k0 <> 0 OR (key).k1 <> 0 OR (key).k2 <> 0 OR (key).k3 <> 0) |
||||
); |
||||
|
||||
CREATE TABLE containers ( |
||||
id BIGSERIAL PRIMARY KEY NOT NULL, |
||||
crc32 INT NOT NULL, |
||||
whirlpool BYTEA UNIQUE NOT NULL, |
||||
data BYTEA NOT NULL, |
||||
encrypted BOOLEAN NOT NULL, |
||||
key_id BIGINT NULL REFERENCES keys (id) |
||||
); |
||||
|
||||
CREATE INDEX ON containers USING HASH (crc32); |
||||
|
||||
CREATE TABLE brute_force_iterator ( |
||||
last_container_id BIGINT NULL, |
||||
last_key_id BIGINT NULL |
||||
); |
||||
|
||||
CREATE UNIQUE INDEX ON brute_force_iterator ((TRUE)); |
||||
|
||||
INSERT INTO brute_force_iterator (last_container_id, last_key_id) |
||||
VALUES (NULL, NULL); |
||||
|
||||
CREATE TABLE groups ( |
||||
archive_id uint1 NOT NULL, |
||||
group_id INTEGER NOT NULL, |
||||
container_id BIGINT NOT NULL REFERENCES containers (id), |
||||
truncated_version uint2 NOT NULL, |
||||
PRIMARY KEY (archive_id, group_id, container_id, truncated_version) |
||||
); |
||||
|
||||
CREATE TABLE indexes ( |
||||
container_id BIGINT PRIMARY KEY NOT NULL REFERENCES containers (id), |
||||
version INTEGER NOT NULL |
||||
); |
||||
|
||||
CREATE TABLE index_groups ( |
||||
container_id BIGINT NOT NULL REFERENCES indexes (container_id), |
||||
group_id INTEGER NOT NULL, |
||||
crc32 INTEGER NOT NULL, |
||||
whirlpool BYTEA NULL, |
||||
version INTEGER NOT NULL, |
||||
name_hash INTEGER NULL, |
||||
PRIMARY KEY (container_id, group_id) |
||||
); |
||||
|
||||
CREATE TABLE index_files ( |
||||
container_id BIGINT NOT NULL, |
||||
group_id INTEGER NOT NULL, |
||||
file_id INTEGER NOT NULL, |
||||
name_hash INTEGER NULL, |
||||
PRIMARY KEY (container_id, group_id, file_id), |
||||
FOREIGN KEY (container_id, group_id) REFERENCES index_groups (container_id, group_id) |
||||
); |
||||
|
||||
CREATE TABLE caches ( |
||||
id BIGSERIAL PRIMARY KEY NOT NULL, |
||||
-- This doesn't correspond to a hash used by the client - it was just |
||||
-- convenient to re-use Whirlpool given we already use it elsewhere in the |
||||
-- codebase. |
||||
-- |
||||
-- It is a hash over the whirlpool hashes of a cache's Js5Indexes, used to |
||||
-- make it easier to identify an individual cache row in a relational |
||||
-- database. |
||||
whirlpool BYTEA UNIQUE NOT NULL |
||||
); |
||||
|
||||
CREATE TABLE cache_indexes ( |
||||
cache_id BIGINT NOT NULL REFERENCES caches (id), |
||||
archive_id uint1 NOT NULL, |
||||
container_id BIGINT NOT NULL REFERENCES indexes (container_id), |
||||
PRIMARY KEY (cache_id, archive_id) |
||||
); |
||||
|
||||
CREATE TABLE names ( |
||||
hash INTEGER NOT NULL, |
||||
name TEXT PRIMARY KEY NOT NULL |
||||
); |
||||
|
||||
CREATE UNIQUE INDEX ON names (hash, name); |
||||
-- @formatter:on |
Loading…
Reference in new issue