forked from openrs2/openrs2
Compare commits
168 Commits
@ -0,0 +1,16 @@ |
|||||||
|
package org.openrs2.archive.cache |
||||||
|
|
||||||
|
import com.github.ajalt.clikt.core.CliktCommand |
||||||
|
import com.google.inject.Guice |
||||||
|
import kotlinx.coroutines.runBlocking |
||||||
|
import org.openrs2.archive.ArchiveModule |
||||||
|
import org.openrs2.inject.CloseableInjector |
||||||
|
|
||||||
|
public class CrossPollinateCommand : CliktCommand(name = "cross-pollinate") { |
||||||
|
override fun run(): Unit = runBlocking { |
||||||
|
CloseableInjector(Guice.createInjector(ArchiveModule)).use { injector -> |
||||||
|
val crossPollinator = injector.getInstance(CrossPollinator::class.java) |
||||||
|
crossPollinator.crossPollinate() |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,223 @@ |
|||||||
|
package org.openrs2.archive.cache |
||||||
|
|
||||||
|
import io.netty.buffer.ByteBuf |
||||||
|
import io.netty.buffer.ByteBufAllocator |
||||||
|
import io.netty.buffer.ByteBufInputStream |
||||||
|
import io.netty.buffer.Unpooled |
||||||
|
import org.openrs2.buffer.crc32 |
||||||
|
import org.openrs2.buffer.use |
||||||
|
import org.openrs2.cache.Js5Compression |
||||||
|
import org.openrs2.cache.Js5CompressionType |
||||||
|
import org.openrs2.db.Database |
||||||
|
import java.sql.Connection |
||||||
|
import java.util.zip.GZIPInputStream |
||||||
|
import javax.inject.Inject |
||||||
|
import javax.inject.Singleton |
||||||
|
|
||||||
|
@Singleton |
||||||
|
public class CrossPollinator @Inject constructor( |
||||||
|
private val database: Database, |
||||||
|
private val alloc: ByteBufAllocator, |
||||||
|
private val importer: CacheImporter |
||||||
|
) { |
||||||
|
public suspend fun crossPollinate() { |
||||||
|
database.execute { connection -> |
||||||
|
for ((index, archive) in OLD_TO_NEW_ENGINE) { |
||||||
|
crossPollinate(connection, index, archive); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private fun crossPollinate(connection: Connection, index: Int, archive: Int) { |
||||||
|
val scopeId: Int |
||||||
|
|
||||||
|
connection.prepareStatement( |
||||||
|
""" |
||||||
|
SELECT id |
||||||
|
FROM scopes |
||||||
|
WHERE name = 'runescape' |
||||||
|
""".trimIndent() |
||||||
|
).use { stmt -> |
||||||
|
stmt.executeQuery().use { rows -> |
||||||
|
check(rows.next()) |
||||||
|
|
||||||
|
scopeId = rows.getInt(1) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
val groups = mutableListOf<CacheImporter.Group>() |
||||||
|
val files = mutableListOf<CacheImporter.File>() |
||||||
|
|
||||||
|
try { |
||||||
|
connection.prepareStatement( |
||||||
|
""" |
||||||
|
SELECT |
||||||
|
new.group_id AS id, |
||||||
|
old.version AS old_version, |
||||||
|
old.crc32 AS old_crc32, |
||||||
|
b.data AS old_data, |
||||||
|
new.version AS new_version, |
||||||
|
new.crc32 AS new_crc32, |
||||||
|
c.data AS new_data |
||||||
|
FROM ( |
||||||
|
SELECT DISTINCT vf.index_id, vf.file_id, vf.version, vf.crc32 |
||||||
|
FROM version_list_files vf |
||||||
|
WHERE vf.blob_id IN ( |
||||||
|
SELECT v.blob_id |
||||||
|
FROM version_lists v |
||||||
|
JOIN resolved_archives a ON a.blob_id = v.blob_id AND a.archive_id = 5 |
||||||
|
) AND vf.index_id = ? |
||||||
|
) old |
||||||
|
JOIN ( |
||||||
|
SELECT DISTINCT ig.group_id, ig.version, ig.crc32 |
||||||
|
FROM index_groups ig |
||||||
|
WHERE ig.container_id IN ( |
||||||
|
SELECT i.container_id |
||||||
|
FROM resolved_indexes i |
||||||
|
WHERE i.scope_id = ? AND i.archive_id = ? |
||||||
|
) |
||||||
|
) new ON old.file_id = new.group_id AND old.version = new.version + 1 |
||||||
|
LEFT JOIN resolve_file(old.index_id, old.file_id, old.version, old.crc32) b ON TRUE |
||||||
|
LEFT JOIN resolve_group(?, ?::uint1, new.group_id, new.crc32, new.version) c ON TRUE |
||||||
|
WHERE (b.data IS NULL AND c.data IS NOT NULL) OR (b.data IS NOT NULL AND c.data IS NULL) |
||||||
|
""".trimIndent() |
||||||
|
).use { stmt -> |
||||||
|
stmt.setInt(1, index) |
||||||
|
stmt.setInt(2, scopeId) |
||||||
|
stmt.setInt(3, archive) |
||||||
|
stmt.setInt(4, scopeId) |
||||||
|
stmt.setInt(5, archive) |
||||||
|
|
||||||
|
stmt.executeQuery().use { rows -> |
||||||
|
while (rows.next()) { |
||||||
|
val id = rows.getInt(1) |
||||||
|
val oldVersion = rows.getInt(2) |
||||||
|
val oldChecksum = rows.getInt(3) |
||||||
|
val newVersion = rows.getInt(5) |
||||||
|
val newChecksum = rows.getInt(6) |
||||||
|
|
||||||
|
val oldData = rows.getBytes(4) |
||||||
|
if (oldData != null) { |
||||||
|
Unpooled.wrappedBuffer(oldData).use { oldBuf -> |
||||||
|
fileToGroup(oldBuf, newChecksum).use { newBuf -> |
||||||
|
if (newBuf != null) { |
||||||
|
val uncompressed = Js5Compression.uncompressUnlessEncrypted(newBuf.slice()) |
||||||
|
groups += CacheImporter.Group( |
||||||
|
archive, |
||||||
|
id, |
||||||
|
newBuf.retain(), |
||||||
|
uncompressed, |
||||||
|
newVersion, |
||||||
|
false |
||||||
|
) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
val newData = rows.getBytes(7) |
||||||
|
if (newData != null) { |
||||||
|
Unpooled.wrappedBuffer(newData).use { newBuf -> |
||||||
|
val oldBuf = groupToFile(newBuf, oldChecksum) |
||||||
|
if (oldBuf != null) { |
||||||
|
files += CacheImporter.File(index, id, oldBuf, oldVersion) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if (groups.isEmpty() && files.isEmpty()) { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
importer.prepare(connection) |
||||||
|
|
||||||
|
val sourceId = importer.addSource( |
||||||
|
connection, |
||||||
|
type = CacheImporter.SourceType.CROSS_POLLINATION, |
||||||
|
cacheId = null, |
||||||
|
gameId = null, |
||||||
|
buildMajor = null, |
||||||
|
buildMinor = null, |
||||||
|
timestamp = null, |
||||||
|
name = null, |
||||||
|
description = null, |
||||||
|
url = null, |
||||||
|
) |
||||||
|
|
||||||
|
if (groups.isNotEmpty()) { |
||||||
|
importer.addGroups(connection, scopeId, sourceId, groups) |
||||||
|
} |
||||||
|
|
||||||
|
if (files.isNotEmpty()) { |
||||||
|
importer.addFiles(connection, sourceId, files) |
||||||
|
} |
||||||
|
} finally { |
||||||
|
groups.forEach(CacheImporter.Group::release) |
||||||
|
files.forEach(CacheImporter.File::release) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private fun getUncompressedLength(buf: ByteBuf): Int { |
||||||
|
GZIPInputStream(ByteBufInputStream(buf)).use { input -> |
||||||
|
var len = 0 |
||||||
|
val temp = ByteArray(4096) |
||||||
|
|
||||||
|
while (true) { |
||||||
|
val n = input.read(temp) |
||||||
|
if (n == -1) { |
||||||
|
break |
||||||
|
} |
||||||
|
len += n |
||||||
|
} |
||||||
|
|
||||||
|
return len |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private fun fileToGroup(input: ByteBuf, expectedChecksum: Int): ByteBuf? { |
||||||
|
val len = input.readableBytes() |
||||||
|
val lenWithHeader = len + JS5_COMPRESSION_HEADER_LEN |
||||||
|
val uncompressedLen = getUncompressedLength(input.slice()) |
||||||
|
|
||||||
|
alloc.buffer(lenWithHeader, lenWithHeader).use { output -> |
||||||
|
output.writeByte(Js5CompressionType.GZIP.ordinal) |
||||||
|
output.writeInt(len) |
||||||
|
output.writeInt(uncompressedLen) |
||||||
|
output.writeBytes(input) |
||||||
|
|
||||||
|
return if (output.crc32() == expectedChecksum) { |
||||||
|
output.retain() |
||||||
|
} else { |
||||||
|
null |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private fun groupToFile(input: ByteBuf, expectedChecksum: Int): ByteBuf? { |
||||||
|
val type = Js5CompressionType.fromOrdinal(input.readUnsignedByte().toInt()) |
||||||
|
if (type != Js5CompressionType.GZIP) { |
||||||
|
return null |
||||||
|
} |
||||||
|
|
||||||
|
input.skipBytes(JS5_COMPRESSION_HEADER_LEN - 1) |
||||||
|
|
||||||
|
return if (input.crc32() == expectedChecksum) { |
||||||
|
input.retainedSlice() |
||||||
|
} else { |
||||||
|
null |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private companion object { |
||||||
|
private val OLD_TO_NEW_ENGINE = mapOf( |
||||||
|
1 to 7, // MODELS |
||||||
|
3 to 6, // MIDI_SONGS |
||||||
|
4 to 5, // MAPS |
||||||
|
) |
||||||
|
|
||||||
|
private const val JS5_COMPRESSION_HEADER_LEN = 9 |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,149 @@ |
|||||||
|
package org.openrs2.archive.cache.finder |
||||||
|
|
||||||
|
import com.github.michaelbull.logging.InlineLogger |
||||||
|
import com.google.common.io.ByteStreams |
||||||
|
import com.google.common.io.LittleEndianDataInputStream |
||||||
|
import org.openrs2.util.charset.Cp1252Charset |
||||||
|
import java.io.Closeable |
||||||
|
import java.io.EOFException |
||||||
|
import java.io.IOException |
||||||
|
import java.io.InputStream |
||||||
|
import java.io.PushbackInputStream |
||||||
|
import java.nio.file.Files |
||||||
|
import java.nio.file.Path |
||||||
|
import java.nio.file.attribute.BasicFileAttributeView |
||||||
|
import java.nio.file.attribute.FileTime |
||||||
|
import java.time.Instant |
||||||
|
|
||||||
|
public class CacheFinderExtractor( |
||||||
|
input: InputStream |
||||||
|
) : Closeable { |
||||||
|
private val pushbackInput = PushbackInputStream(input) |
||||||
|
private val input = LittleEndianDataInputStream(pushbackInput) |
||||||
|
|
||||||
|
private fun readTimestamp(): FileTime { |
||||||
|
val lo = input.readInt().toLong() and 0xFFFFFFFF |
||||||
|
val hi = input.readInt().toLong() and 0xFFFFFFFF |
||||||
|
|
||||||
|
val seconds = (((hi shl 32) or lo) / 10_000_000) - FILETIME_TO_UNIX_EPOCH |
||||||
|
|
||||||
|
return FileTime.from(Instant.ofEpochSecond(seconds, lo)) |
||||||
|
} |
||||||
|
|
||||||
|
private fun readName(): String { |
||||||
|
val bytes = ByteArray(MAX_PATH) |
||||||
|
input.readFully(bytes) |
||||||
|
|
||||||
|
var len = bytes.size |
||||||
|
for ((i, b) in bytes.withIndex()) { |
||||||
|
if (b.toInt() == 0) { |
||||||
|
len = i |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return String(bytes, 0, len, Cp1252Charset) |
||||||
|
} |
||||||
|
|
||||||
|
private fun peekUnsignedByte(): Int { |
||||||
|
val n = pushbackInput.read() |
||||||
|
pushbackInput.unread(n) |
||||||
|
return n |
||||||
|
} |
||||||
|
|
||||||
|
public fun extract(destination: Path) { |
||||||
|
val newVersion = peekUnsignedByte() == 0xFE |
||||||
|
if (newVersion) { |
||||||
|
val signature = input.readInt() |
||||||
|
if (signature != 0x435352FE) { |
||||||
|
throw IOException("Invalid signature") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
var readDirectoryPath = true |
||||||
|
var number = 0 |
||||||
|
var directorySuffix: String? = null |
||||||
|
|
||||||
|
while (true) { |
||||||
|
if (newVersion && readDirectoryPath) { |
||||||
|
val len = try { |
||||||
|
input.readInt() |
||||||
|
} catch (ex: EOFException) { |
||||||
|
break |
||||||
|
} |
||||||
|
|
||||||
|
val bytes = ByteArray(len) |
||||||
|
input.readFully(bytes) |
||||||
|
|
||||||
|
val path = String(bytes, Cp1252Charset) |
||||||
|
logger.info { "Extracting $path" } |
||||||
|
|
||||||
|
readDirectoryPath = false |
||||||
|
directorySuffix = path.substring(path.lastIndexOf('\\') + 1) |
||||||
|
.replace(INVALID_CHARS, "_") |
||||||
|
|
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
if (peekUnsignedByte() == 0xFF) { |
||||||
|
input.skipBytes(1) |
||||||
|
readDirectoryPath = true |
||||||
|
number++ |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
val attributes = try { |
||||||
|
input.readInt() |
||||||
|
} catch (ex: EOFException) { |
||||||
|
break |
||||||
|
} |
||||||
|
|
||||||
|
val btime = readTimestamp() |
||||||
|
val atime = readTimestamp() |
||||||
|
val mtime = readTimestamp() |
||||||
|
|
||||||
|
val sizeHi = input.readInt().toLong() and 0xFFFFFFFF |
||||||
|
val sizeLo = input.readInt().toLong() and 0xFFFFFFFF |
||||||
|
val size = (sizeHi shl 32) or sizeLo |
||||||
|
|
||||||
|
input.skipBytes(8) // reserved |
||||||
|
|
||||||
|
val name = readName() |
||||||
|
|
||||||
|
input.skipBytes(14) // alternate name |
||||||
|
input.skipBytes(2) // padding |
||||||
|
|
||||||
|
val dir = if (directorySuffix != null) { |
||||||
|
destination.resolve("cache${number}_$directorySuffix") |
||||||
|
} else { |
||||||
|
destination.resolve("cache$number") |
||||||
|
} |
||||||
|
|
||||||
|
Files.createDirectories(dir) |
||||||
|
|
||||||
|
if ((attributes and FILE_ATTRIBUTE_DIRECTORY) == 0) { |
||||||
|
val file = dir.resolve(name) |
||||||
|
|
||||||
|
Files.newOutputStream(file).use { output -> |
||||||
|
ByteStreams.copy(ByteStreams.limit(input, size), output) |
||||||
|
} |
||||||
|
|
||||||
|
val view = Files.getFileAttributeView(file, BasicFileAttributeView::class.java) |
||||||
|
view.setTimes(mtime, atime, btime) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
override fun close() { |
||||||
|
input.close() |
||||||
|
} |
||||||
|
|
||||||
|
private companion object { |
||||||
|
private const val FILETIME_TO_UNIX_EPOCH: Long = 11644473600 |
||||||
|
private const val MAX_PATH = 260 |
||||||
|
private const val FILE_ATTRIBUTE_DIRECTORY = 0x10 |
||||||
|
private val INVALID_CHARS = Regex("[^A-Za-z0-9-]") |
||||||
|
|
||||||
|
private val logger = InlineLogger() |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,25 @@ |
|||||||
|
package org.openrs2.archive.cache.finder |
||||||
|
|
||||||
|
import com.github.ajalt.clikt.core.CliktCommand |
||||||
|
import com.github.ajalt.clikt.parameters.arguments.argument |
||||||
|
import com.github.ajalt.clikt.parameters.arguments.default |
||||||
|
import com.github.ajalt.clikt.parameters.types.inputStream |
||||||
|
import com.github.ajalt.clikt.parameters.types.path |
||||||
|
import java.nio.file.Path |
||||||
|
|
||||||
|
public class ExtractCommand : CliktCommand(name = "extract") { |
||||||
|
private val input by argument().inputStream() |
||||||
|
private val output by argument().path( |
||||||
|
mustExist = false, |
||||||
|
canBeFile = false, |
||||||
|
canBeDir = true, |
||||||
|
mustBeReadable = true, |
||||||
|
mustBeWritable = true |
||||||
|
).default(Path.of(".")) |
||||||
|
|
||||||
|
override fun run() { |
||||||
|
CacheFinderExtractor(input).use { extractor -> |
||||||
|
extractor.extract(output) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -1,25 +1,8 @@ |
|||||||
package org.openrs2.archive.cache.nxt |
package org.openrs2.archive.cache.nxt |
||||||
|
|
||||||
import io.netty.buffer.ByteBuf |
import org.openrs2.protocol.EmptyPacketCodec |
||||||
import org.openrs2.crypto.StreamCipher |
|
||||||
import org.openrs2.protocol.PacketCodec |
|
||||||
|
|
||||||
public object Js5OkCodec : PacketCodec<LoginResponse.Js5Ok>( |
public object Js5OkCodec : EmptyPacketCodec<LoginResponse.Js5Ok>( |
||||||
opcode = 0, |
opcode = 0, |
||||||
length = LoginResponse.Js5Ok.LOADING_REQUIREMENTS * 4, |
packet = LoginResponse.Js5Ok |
||||||
type = LoginResponse.Js5Ok::class.java |
) |
||||||
) { |
|
||||||
override fun decode(input: ByteBuf, cipher: StreamCipher): LoginResponse.Js5Ok { |
|
||||||
val loadingRequirements = mutableListOf<Int>() |
|
||||||
for (i in 0 until LoginResponse.Js5Ok.LOADING_REQUIREMENTS) { |
|
||||||
loadingRequirements += input.readInt() |
|
||||||
} |
|
||||||
return LoginResponse.Js5Ok(loadingRequirements) |
|
||||||
} |
|
||||||
|
|
||||||
override fun encode(input: LoginResponse.Js5Ok, output: ByteBuf, cipher: StreamCipher) { |
|
||||||
for (requirement in input.loadingRequirements) { |
|
||||||
output.writeInt(requirement) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
@ -0,0 +1,57 @@ |
|||||||
|
package org.openrs2.archive.key |
||||||
|
|
||||||
|
import kotlinx.coroutines.Dispatchers |
||||||
|
import kotlinx.coroutines.future.await |
||||||
|
import kotlinx.coroutines.withContext |
||||||
|
import org.openrs2.crypto.XteaKey |
||||||
|
import org.openrs2.http.checkStatusCode |
||||||
|
import java.net.URI |
||||||
|
import java.net.http.HttpClient |
||||||
|
import java.net.http.HttpRequest |
||||||
|
import java.net.http.HttpResponse |
||||||
|
import java.time.Duration |
||||||
|
import javax.inject.Inject |
||||||
|
import javax.inject.Singleton |
||||||
|
|
||||||
|
@Singleton |
||||||
|
public class HdosKeyDownloader @Inject constructor( |
||||||
|
private val client: HttpClient |
||||||
|
) : KeyDownloader(KeySource.HDOS) { |
||||||
|
override suspend fun getMissingUrls(seenUrls: Set<String>): Set<String> { |
||||||
|
return setOf(ENDPOINT) |
||||||
|
} |
||||||
|
|
||||||
|
override suspend fun download(url: String): Sequence<XteaKey> { |
||||||
|
val request = HttpRequest.newBuilder(URI(url)) |
||||||
|
.GET() |
||||||
|
.timeout(Duration.ofSeconds(30)) |
||||||
|
.build() |
||||||
|
|
||||||
|
val response = client.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream()).await() |
||||||
|
response.checkStatusCode() |
||||||
|
|
||||||
|
return withContext(Dispatchers.IO) { |
||||||
|
response.body().use { input -> |
||||||
|
input.bufferedReader().use { reader -> |
||||||
|
val keys = mutableSetOf<XteaKey>() |
||||||
|
|
||||||
|
for (line in reader.lineSequence()) { |
||||||
|
val parts = line.split(',') |
||||||
|
if (parts.size < 3) { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
val key = XteaKey.fromHexOrNull(parts[2]) ?: continue |
||||||
|
keys += key |
||||||
|
} |
||||||
|
|
||||||
|
keys.asSequence() |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private companion object { |
||||||
|
private const val ENDPOINT = "https://api.hdos.dev/keys/get" |
||||||
|
} |
||||||
|
} |
@ -1,19 +0,0 @@ |
|||||||
package org.openrs2.archive.key |
|
||||||
|
|
||||||
import java.net.http.HttpClient |
|
||||||
import javax.inject.Inject |
|
||||||
import javax.inject.Singleton |
|
||||||
|
|
||||||
@Singleton |
|
||||||
public class OpenOsrsKeyDownloader @Inject constructor( |
|
||||||
client: HttpClient, |
|
||||||
jsonKeyReader: JsonKeyReader |
|
||||||
) : JsonKeyDownloader(KeySource.OPENOSRS, client, jsonKeyReader) { |
|
||||||
override suspend fun getMissingUrls(seenUrls: Set<String>): Set<String> { |
|
||||||
return setOf(ENDPOINT) |
|
||||||
} |
|
||||||
|
|
||||||
private companion object { |
|
||||||
private const val ENDPOINT = "https://xtea.openosrs.dev/get" |
|
||||||
} |
|
||||||
} |
|
@ -1,50 +0,0 @@ |
|||||||
package org.openrs2.archive.key |
|
||||||
|
|
||||||
import kotlinx.coroutines.Dispatchers |
|
||||||
import kotlinx.coroutines.future.await |
|
||||||
import kotlinx.coroutines.withContext |
|
||||||
import org.jsoup.Jsoup |
|
||||||
import org.openrs2.http.charset |
|
||||||
import org.openrs2.http.checkStatusCode |
|
||||||
import java.net.URI |
|
||||||
import java.net.http.HttpClient |
|
||||||
import java.net.http.HttpRequest |
|
||||||
import java.net.http.HttpResponse |
|
||||||
import java.time.Duration |
|
||||||
import javax.inject.Inject |
|
||||||
import javax.inject.Singleton |
|
||||||
|
|
||||||
@Singleton |
|
||||||
public class PolarKeyDownloader @Inject constructor( |
|
||||||
private val client: HttpClient, |
|
||||||
jsonKeyReader: JsonKeyReader |
|
||||||
) : JsonKeyDownloader(KeySource.POLAR, client, jsonKeyReader) { |
|
||||||
override suspend fun getMissingUrls(seenUrls: Set<String>): Set<String> { |
|
||||||
val request = HttpRequest.newBuilder(ENDPOINT) |
|
||||||
.GET() |
|
||||||
.timeout(Duration.ofSeconds(30)) |
|
||||||
.build() |
|
||||||
|
|
||||||
val response = client.sendAsync(request, HttpResponse.BodyHandlers.ofInputStream()).await() |
|
||||||
response.checkStatusCode() |
|
||||||
|
|
||||||
val document = withContext(Dispatchers.IO) { |
|
||||||
Jsoup.parse(response.body(), response.charset?.name(), ENDPOINT.toString()) |
|
||||||
} |
|
||||||
|
|
||||||
val urls = mutableSetOf<String>() |
|
||||||
|
|
||||||
for (element in document.select("a")) { |
|
||||||
val url = element.absUrl("href") |
|
||||||
if (url.endsWith(".json") && url !in seenUrls) { |
|
||||||
urls += url |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return urls |
|
||||||
} |
|
||||||
|
|
||||||
private companion object { |
|
||||||
private val ENDPOINT = URI("https://archive.runestats.com/osrs/xtea/") |
|
||||||
} |
|
||||||
} |
|
@ -0,0 +1,176 @@ |
|||||||
|
-- @formatter:off |
||||||
|
CREATE TABLE scopes ( |
||||||
|
id SERIAL PRIMARY KEY NOT NULL, |
||||||
|
name TEXT UNIQUE NOT NULL |
||||||
|
); |
||||||
|
|
||||||
|
INSERT INTO scopes (name) VALUES ('runescape'); |
||||||
|
|
||||||
|
ALTER TABLE games |
||||||
|
ADD COLUMN scope_id INTEGER DEFAULT 1 NOT NULL REFERENCES scopes (id); |
||||||
|
|
||||||
|
ALTER TABLE games |
||||||
|
ALTER COLUMN scope_id DROP DEFAULT; |
||||||
|
|
||||||
|
-- XXX(gpe): I don't think we can easily replace this as the source_groups |
||||||
|
-- table doesn't contain a scope_id directly - only indirectly via the sources |
||||||
|
-- and games tables. |
||||||
|
ALTER TABLE source_groups |
||||||
|
DROP CONSTRAINT source_groups_archive_id_group_id_version_version_truncate_fkey; |
||||||
|
|
||||||
|
ALTER TABLE groups |
||||||
|
ADD COLUMN scope_id INTEGER DEFAULT 1 NOT NULL REFERENCES scopes (id), |
||||||
|
DROP CONSTRAINT groups_pkey, |
||||||
|
ADD PRIMARY KEY (scope_id, archive_id, group_id, version, version_truncated, container_id); |
||||||
|
|
||||||
|
ALTER TABLE groups |
||||||
|
ALTER COLUMN scope_id DROP DEFAULT; |
||||||
|
|
||||||
|
CREATE FUNCTION resolve_index(_scope_id INTEGER, _archive_id uint1, _crc32 INTEGER, _version INTEGER) RETURNS SETOF containers AS $$ |
||||||
|
SELECT c.* |
||||||
|
FROM groups g |
||||||
|
JOIN containers c ON c.id = g.container_id |
||||||
|
JOIN indexes i ON i.container_id = c.id |
||||||
|
WHERE g.scope_id = _scope_id AND g.archive_id = 255 AND g.group_id = _archive_id::INTEGER AND c.crc32 = _crc32 AND |
||||||
|
g.version = _version AND NOT g.version_truncated AND i.version = _version |
||||||
|
ORDER BY c.id ASC |
||||||
|
LIMIT 1; |
||||||
|
$$ LANGUAGE SQL STABLE PARALLEL SAFE ROWS 1; |
||||||
|
|
||||||
|
CREATE FUNCTION resolve_group(_scope_id INTEGER, _archive_id uint1, _group_id INTEGER, _crc32 INTEGER, _version INTEGER) RETURNS SETOF containers AS $$ |
||||||
|
SELECT c.* |
||||||
|
FROM groups g |
||||||
|
JOIN containers c ON c.id = g.container_id |
||||||
|
WHERE g.scope_id = _scope_id AND g.archive_id = _archive_id AND g.group_id = _group_id AND c.crc32 = _crc32 AND ( |
||||||
|
(g.version = _version AND NOT g.version_truncated) OR |
||||||
|
(g.version = _version & 65535 AND g.version_truncated) |
||||||
|
) |
||||||
|
ORDER BY g.version_truncated ASC, c.id ASC |
||||||
|
LIMIT 1; |
||||||
|
$$ LANGUAGE SQL STABLE PARALLEL SAFE ROWS 1; |
||||||
|
|
||||||
|
DROP VIEW resolved_groups; |
||||||
|
DROP VIEW resolved_indexes; |
||||||
|
|
||||||
|
CREATE VIEW resolved_indexes AS |
||||||
|
SELECT s.id AS scope_id, m.id AS master_index_id, a.archive_id, c.data, c.id AS container_id |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN master_indexes m |
||||||
|
JOIN master_index_archives a ON a.master_index_id = m.id |
||||||
|
JOIN resolve_index(s.id, a.archive_id, a.crc32, a.version) c ON TRUE; |
||||||
|
|
||||||
|
CREATE VIEW resolved_groups (scope_id, master_index_id, archive_id, group_id, name_hash, version, data, encrypted, empty_loc, key_id) AS |
||||||
|
WITH i AS NOT MATERIALIZED ( |
||||||
|
SELECT scope_id, master_index_id, archive_id, data, container_id |
||||||
|
FROM resolved_indexes |
||||||
|
) |
||||||
|
SELECT i.scope_id, i.master_index_id, 255::uint1, i.archive_id::INTEGER, NULL, NULL, i.data, FALSE, FALSE, NULL |
||||||
|
FROM i |
||||||
|
UNION ALL |
||||||
|
SELECT i.scope_id, i.master_index_id, i.archive_id, ig.group_id, ig.name_hash, ig.version, c.data, c.encrypted, c.empty_loc, c.key_id |
||||||
|
FROM i |
||||||
|
JOIN index_groups ig ON ig.container_id = i.container_id |
||||||
|
JOIN resolve_group(i.scope_id, i.archive_id, ig.group_id, ig.crc32, ig.version) c ON TRUE; |
||||||
|
|
||||||
|
DROP VIEW colliding_groups; |
||||||
|
|
||||||
|
CREATE VIEW colliding_groups (scope_id, archive_id, group_id, crc32, truncated_version, versions, containers) AS |
||||||
|
SELECT |
||||||
|
g.scope_id, |
||||||
|
g.archive_id, |
||||||
|
g.group_id, |
||||||
|
c.crc32, |
||||||
|
g.version & 65535 AS truncated_version, |
||||||
|
array_agg(DISTINCT g.version ORDER BY g.version ASC), |
||||||
|
array_agg(DISTINCT c.id ORDER BY c.id ASC) |
||||||
|
FROM groups g |
||||||
|
JOIN containers c ON c.id = g.container_id |
||||||
|
GROUP BY g.scope_id, g.archive_id, g.group_id, c.crc32, truncated_version |
||||||
|
HAVING COUNT(DISTINCT c.id) > 1; |
||||||
|
|
||||||
|
DROP VIEW cache_stats; |
||||||
|
DROP MATERIALIZED VIEW master_index_stats; |
||||||
|
DROP MATERIALIZED VIEW index_stats; |
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW index_stats ( |
||||||
|
scope_id, |
||||||
|
archive_id, |
||||||
|
container_id, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
g.group_id AS archive_id, |
||||||
|
i.container_id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL) AS valid_groups, |
||||||
|
COUNT(*) AS groups, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted AND (c.key_id IS NOT NULL OR c.empty_loc)) AS valid_keys, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted) AS keys, |
||||||
|
SUM(length(c.data) + 2) FILTER (WHERE c.id IS NOT NULL) AS size, |
||||||
|
SUM(group_blocks(ig.group_id, length(c.data) + 2)) FILTER (WHERE c.id IS NOT NULL) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN indexes i |
||||||
|
JOIN groups g ON g.container_id = i.container_id AND g.archive_id = 255 AND NOT g.version_truncated AND |
||||||
|
g.version = i.version |
||||||
|
JOIN index_groups ig ON ig.container_id = i.container_id |
||||||
|
LEFT JOIN resolve_group(s.id, g.group_id::uint1, ig.group_id, ig.crc32, ig.version) c ON TRUE |
||||||
|
GROUP BY s.id, g.group_id, i.container_id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON index_stats (scope_id, archive_id, container_id); |
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW master_index_stats ( |
||||||
|
scope_id, |
||||||
|
master_index_id, |
||||||
|
valid_indexes, |
||||||
|
indexes, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
sc.id, |
||||||
|
m.id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL OR (a.version = 0 AND a.crc32 = 0)) AS valid_indexes, |
||||||
|
COUNT(*) FILTER (WHERE a.master_index_id IS NOT NULL) AS indexes, |
||||||
|
SUM(COALESCE(s.valid_groups, 0)) AS valid_groups, |
||||||
|
SUM(COALESCE(s.groups, 0)) AS groups, |
||||||
|
SUM(COALESCE(s.valid_keys, 0)) AS valid_keys, |
||||||
|
SUM(COALESCE(s.keys, 0)) AS keys, |
||||||
|
SUM(COALESCE(s.size, 0)) + SUM(COALESCE(length(c.data), 0)) AS size, |
||||||
|
SUM(COALESCE(s.blocks, 0)) + SUM(COALESCE(group_blocks(a.archive_id, length(c.data)), 0)) AS blocks |
||||||
|
FROM scopes sc |
||||||
|
CROSS JOIN master_indexes m |
||||||
|
LEFT JOIN master_index_archives a ON a.master_index_id = m.id |
||||||
|
LEFT JOIN resolve_index(sc.id, a.archive_id, a.crc32, a.version) c ON TRUE |
||||||
|
LEFT JOIN index_stats s ON s.scope_id = sc.id AND s.archive_id = a.archive_id AND s.container_id = c.id |
||||||
|
GROUP BY sc.id, m.id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON master_index_stats (scope_id, master_index_id); |
||||||
|
|
||||||
|
CREATE VIEW cache_stats AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
c.id AS cache_id, |
||||||
|
COALESCE(ms.valid_indexes, cs.valid_archives) AS valid_indexes, |
||||||
|
COALESCE(ms.indexes, cs.archives) AS indexes, |
||||||
|
COALESCE(ms.valid_groups, cs.valid_files) AS valid_groups, |
||||||
|
COALESCE(ms.groups, cs.files) AS groups, |
||||||
|
COALESCE(ms.valid_keys, 0) AS valid_keys, |
||||||
|
COALESCE(ms.keys, 0) AS keys, |
||||||
|
COALESCE(ms.size, cs.size) AS size, |
||||||
|
COALESCE(ms.blocks, cs.blocks) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN caches c |
||||||
|
LEFT JOIN master_index_stats ms ON ms.scope_id = s.id AND ms.master_index_id = c.id |
||||||
|
LEFT JOIN crc_table_stats cs ON s.name = 'runescape' AND cs.crc_table_id = c.id; |
||||||
|
|
||||||
|
DROP FUNCTION resolve_group(_archive_id uint1, _group_id INTEGER, _crc32 INTEGER, _version INTEGER); |
||||||
|
DROP FUNCTION resolve_index(_archive_id uint1, _crc32 INTEGER, _version INTEGER); |
@ -0,0 +1,2 @@ |
|||||||
|
-- @formatter:off |
||||||
|
ALTER TABLE caches ADD COLUMN hidden BOOLEAN NOT NULL DEFAULT FALSE; |
@ -0,0 +1,95 @@ |
|||||||
|
-- @formatter:off |
||||||
|
CREATE MATERIALIZED VIEW index_stats_new ( |
||||||
|
scope_id, |
||||||
|
archive_id, |
||||||
|
container_id, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
g.group_id AS archive_id, |
||||||
|
i.container_id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL) AS valid_groups, |
||||||
|
COUNT(*) AS groups, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted AND (c.key_id IS NOT NULL OR c.empty_loc)) AS valid_keys, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted) AS keys, |
||||||
|
SUM(length(c.data) + 2) FILTER (WHERE c.id IS NOT NULL) AS size, |
||||||
|
SUM(group_blocks(ig.group_id, length(c.data) + 2)) FILTER (WHERE c.id IS NOT NULL) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN indexes i |
||||||
|
JOIN groups g ON g.scope_id = s.id AND g.container_id = i.container_id AND g.archive_id = 255 AND |
||||||
|
NOT g.version_truncated AND g.version = i.version |
||||||
|
JOIN index_groups ig ON ig.container_id = i.container_id |
||||||
|
LEFT JOIN resolve_group(s.id, g.group_id::uint1, ig.group_id, ig.crc32, ig.version) c ON TRUE |
||||||
|
GROUP BY s.id, g.group_id, i.container_id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON index_stats_new (scope_id, archive_id, container_id); |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW index_stats RENAME TO index_stats_old; |
||||||
|
ALTER INDEX index_stats_scope_id_archive_id_container_id_idx RENAME TO index_stats_old_scope_id_archive_id_container_id_idx; |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW index_stats_new RENAME TO index_stats; |
||||||
|
ALTER INDEX index_stats_new_scope_id_archive_id_container_id_idx RENAME TO index_stats_scope_id_archive_id_container_id_idx; |
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW master_index_stats_new ( |
||||||
|
scope_id, |
||||||
|
master_index_id, |
||||||
|
valid_indexes, |
||||||
|
indexes, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
sc.id, |
||||||
|
m.id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL OR (a.version = 0 AND a.crc32 = 0)) AS valid_indexes, |
||||||
|
COUNT(*) FILTER (WHERE a.master_index_id IS NOT NULL) AS indexes, |
||||||
|
SUM(COALESCE(s.valid_groups, 0)) AS valid_groups, |
||||||
|
SUM(COALESCE(s.groups, 0)) AS groups, |
||||||
|
SUM(COALESCE(s.valid_keys, 0)) AS valid_keys, |
||||||
|
SUM(COALESCE(s.keys, 0)) AS keys, |
||||||
|
SUM(COALESCE(s.size, 0)) + SUM(COALESCE(length(c.data), 0)) AS size, |
||||||
|
SUM(COALESCE(s.blocks, 0)) + SUM(COALESCE(group_blocks(a.archive_id, length(c.data)), 0)) AS blocks |
||||||
|
FROM scopes sc |
||||||
|
CROSS JOIN master_indexes m |
||||||
|
LEFT JOIN master_index_archives a ON a.master_index_id = m.id |
||||||
|
LEFT JOIN resolve_index(sc.id, a.archive_id, a.crc32, a.version) c ON TRUE |
||||||
|
LEFT JOIN index_stats s ON s.scope_id = sc.id AND s.archive_id = a.archive_id AND s.container_id = c.id |
||||||
|
GROUP BY sc.id, m.id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON master_index_stats_new (scope_id, master_index_id); |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW master_index_stats RENAME TO master_index_stats_old; |
||||||
|
ALTER INDEX master_index_stats_scope_id_master_index_id_idx RENAME TO master_index_stats_old_scope_id_master_index_id_idx; |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW master_index_stats_new RENAME TO master_index_stats; |
||||||
|
ALTER INDEX master_index_stats_new_scope_id_master_index_id_idx RENAME TO master_index_stats_scope_id_master_index_id_idx; |
||||||
|
|
||||||
|
CREATE OR REPLACE VIEW cache_stats AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
c.id AS cache_id, |
||||||
|
COALESCE(ms.valid_indexes, cs.valid_archives) AS valid_indexes, |
||||||
|
COALESCE(ms.indexes, cs.archives) AS indexes, |
||||||
|
COALESCE(ms.valid_groups, cs.valid_files) AS valid_groups, |
||||||
|
COALESCE(ms.groups, cs.files) AS groups, |
||||||
|
COALESCE(ms.valid_keys, 0) AS valid_keys, |
||||||
|
COALESCE(ms.keys, 0) AS keys, |
||||||
|
COALESCE(ms.size, cs.size) AS size, |
||||||
|
COALESCE(ms.blocks, cs.blocks) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN caches c |
||||||
|
LEFT JOIN master_index_stats ms ON ms.scope_id = s.id AND ms.master_index_id = c.id |
||||||
|
LEFT JOIN crc_table_stats cs ON s.name = 'runescape' AND cs.crc_table_id = c.id; |
||||||
|
|
||||||
|
DROP MATERIALIZED VIEW master_index_stats_old; |
||||||
|
DROP MATERIALIZED VIEW index_stats_old; |
@ -0,0 +1,95 @@ |
|||||||
|
-- @formatter:off |
||||||
|
CREATE MATERIALIZED VIEW index_stats_new ( |
||||||
|
scope_id, |
||||||
|
archive_id, |
||||||
|
container_id, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
g.group_id AS archive_id, |
||||||
|
i.container_id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL) AS valid_groups, |
||||||
|
COUNT(*) FILTER (WHERE ig.container_id IS NOT NULL) AS groups, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted AND (c.key_id IS NOT NULL OR c.empty_loc)) AS valid_keys, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted) AS keys, |
||||||
|
SUM(length(c.data) + 2) FILTER (WHERE c.id IS NOT NULL) AS size, |
||||||
|
SUM(group_blocks(ig.group_id, length(c.data) + 2)) FILTER (WHERE c.id IS NOT NULL) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN indexes i |
||||||
|
JOIN groups g ON g.scope_id = s.id AND g.container_id = i.container_id AND g.archive_id = 255 AND |
||||||
|
NOT g.version_truncated AND g.version = i.version |
||||||
|
LEFT JOIN index_groups ig ON ig.container_id = i.container_id |
||||||
|
LEFT JOIN resolve_group(s.id, g.group_id::uint1, ig.group_id, ig.crc32, ig.version) c ON TRUE |
||||||
|
GROUP BY s.id, g.group_id, i.container_id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON index_stats_new (scope_id, archive_id, container_id); |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW index_stats RENAME TO index_stats_old; |
||||||
|
ALTER INDEX index_stats_scope_id_archive_id_container_id_idx RENAME TO index_stats_old_scope_id_archive_id_container_id_idx; |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW index_stats_new RENAME TO index_stats; |
||||||
|
ALTER INDEX index_stats_new_scope_id_archive_id_container_id_idx RENAME TO index_stats_scope_id_archive_id_container_id_idx; |
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW master_index_stats_new ( |
||||||
|
scope_id, |
||||||
|
master_index_id, |
||||||
|
valid_indexes, |
||||||
|
indexes, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
sc.id, |
||||||
|
m.id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL OR (a.version = 0 AND a.crc32 = 0)) AS valid_indexes, |
||||||
|
COUNT(*) FILTER (WHERE a.master_index_id IS NOT NULL) AS indexes, |
||||||
|
SUM(COALESCE(s.valid_groups, 0)) AS valid_groups, |
||||||
|
SUM(COALESCE(s.groups, 0)) AS groups, |
||||||
|
SUM(COALESCE(s.valid_keys, 0)) AS valid_keys, |
||||||
|
SUM(COALESCE(s.keys, 0)) AS keys, |
||||||
|
SUM(COALESCE(s.size, 0)) + SUM(COALESCE(length(c.data), 0)) AS size, |
||||||
|
SUM(COALESCE(s.blocks, 0)) + SUM(COALESCE(group_blocks(a.archive_id, length(c.data)), 0)) AS blocks |
||||||
|
FROM scopes sc |
||||||
|
CROSS JOIN master_indexes m |
||||||
|
LEFT JOIN master_index_archives a ON a.master_index_id = m.id |
||||||
|
LEFT JOIN resolve_index(sc.id, a.archive_id, a.crc32, a.version) c ON TRUE |
||||||
|
LEFT JOIN index_stats s ON s.scope_id = sc.id AND s.archive_id = a.archive_id AND s.container_id = c.id |
||||||
|
GROUP BY sc.id, m.id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON master_index_stats_new (scope_id, master_index_id); |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW master_index_stats RENAME TO master_index_stats_old; |
||||||
|
ALTER INDEX master_index_stats_scope_id_master_index_id_idx RENAME TO master_index_stats_old_scope_id_master_index_id_idx; |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW master_index_stats_new RENAME TO master_index_stats; |
||||||
|
ALTER INDEX master_index_stats_new_scope_id_master_index_id_idx RENAME TO master_index_stats_scope_id_master_index_id_idx; |
||||||
|
|
||||||
|
CREATE OR REPLACE VIEW cache_stats AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
c.id AS cache_id, |
||||||
|
COALESCE(ms.valid_indexes, cs.valid_archives) AS valid_indexes, |
||||||
|
COALESCE(ms.indexes, cs.archives) AS indexes, |
||||||
|
COALESCE(ms.valid_groups, cs.valid_files) AS valid_groups, |
||||||
|
COALESCE(ms.groups, cs.files) AS groups, |
||||||
|
COALESCE(ms.valid_keys, 0) AS valid_keys, |
||||||
|
COALESCE(ms.keys, 0) AS keys, |
||||||
|
COALESCE(ms.size, cs.size) AS size, |
||||||
|
COALESCE(ms.blocks, cs.blocks) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN caches c |
||||||
|
LEFT JOIN master_index_stats ms ON ms.scope_id = s.id AND ms.master_index_id = c.id |
||||||
|
LEFT JOIN crc_table_stats cs ON s.name = 'runescape' AND cs.crc_table_id = c.id; |
||||||
|
|
||||||
|
DROP MATERIALIZED VIEW master_index_stats_old; |
||||||
|
DROP MATERIALIZED VIEW index_stats_old; |
@ -0,0 +1,95 @@ |
|||||||
|
-- @formatter:off |
||||||
|
CREATE MATERIALIZED VIEW index_stats_new ( |
||||||
|
scope_id, |
||||||
|
archive_id, |
||||||
|
container_id, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
g.group_id AS archive_id, |
||||||
|
i.container_id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL) AS valid_groups, |
||||||
|
COUNT(*) FILTER (WHERE ig.container_id IS NOT NULL) AS groups, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted AND (c.key_id IS NOT NULL OR c.empty_loc)) AS valid_keys, |
||||||
|
COUNT(*) FILTER (WHERE c.encrypted) AS keys, |
||||||
|
COALESCE(SUM(length(c.data) + 2) FILTER (WHERE c.id IS NOT NULL), 0) AS size, |
||||||
|
COALESCE(SUM(group_blocks(ig.group_id, length(c.data) + 2)) FILTER (WHERE c.id IS NOT NULL), 0) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN indexes i |
||||||
|
JOIN groups g ON g.scope_id = s.id AND g.container_id = i.container_id AND g.archive_id = 255 AND |
||||||
|
NOT g.version_truncated AND g.version = i.version |
||||||
|
LEFT JOIN index_groups ig ON ig.container_id = i.container_id |
||||||
|
LEFT JOIN resolve_group(s.id, g.group_id::uint1, ig.group_id, ig.crc32, ig.version) c ON TRUE |
||||||
|
GROUP BY s.id, g.group_id, i.container_id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON index_stats_new (scope_id, archive_id, container_id); |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW index_stats RENAME TO index_stats_old; |
||||||
|
ALTER INDEX index_stats_scope_id_archive_id_container_id_idx RENAME TO index_stats_old_scope_id_archive_id_container_id_idx; |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW index_stats_new RENAME TO index_stats; |
||||||
|
ALTER INDEX index_stats_new_scope_id_archive_id_container_id_idx RENAME TO index_stats_scope_id_archive_id_container_id_idx; |
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW master_index_stats_new ( |
||||||
|
scope_id, |
||||||
|
master_index_id, |
||||||
|
valid_indexes, |
||||||
|
indexes, |
||||||
|
valid_groups, |
||||||
|
groups, |
||||||
|
valid_keys, |
||||||
|
keys, |
||||||
|
size, |
||||||
|
blocks |
||||||
|
) AS |
||||||
|
SELECT |
||||||
|
sc.id, |
||||||
|
m.id, |
||||||
|
COUNT(*) FILTER (WHERE c.id IS NOT NULL OR (a.version = 0 AND a.crc32 = 0)) AS valid_indexes, |
||||||
|
COUNT(*) FILTER (WHERE a.master_index_id IS NOT NULL) AS indexes, |
||||||
|
SUM(COALESCE(s.valid_groups, 0)) AS valid_groups, |
||||||
|
SUM(COALESCE(s.groups, 0)) AS groups, |
||||||
|
SUM(COALESCE(s.valid_keys, 0)) AS valid_keys, |
||||||
|
SUM(COALESCE(s.keys, 0)) AS keys, |
||||||
|
SUM(COALESCE(s.size, 0)) + SUM(COALESCE(length(c.data), 0)) AS size, |
||||||
|
SUM(COALESCE(s.blocks, 0)) + SUM(COALESCE(group_blocks(a.archive_id, length(c.data)), 0)) AS blocks |
||||||
|
FROM scopes sc |
||||||
|
CROSS JOIN master_indexes m |
||||||
|
LEFT JOIN master_index_archives a ON a.master_index_id = m.id |
||||||
|
LEFT JOIN resolve_index(sc.id, a.archive_id, a.crc32, a.version) c ON TRUE |
||||||
|
LEFT JOIN index_stats s ON s.scope_id = sc.id AND s.archive_id = a.archive_id AND s.container_id = c.id |
||||||
|
GROUP BY sc.id, m.id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON master_index_stats_new (scope_id, master_index_id); |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW master_index_stats RENAME TO master_index_stats_old; |
||||||
|
ALTER INDEX master_index_stats_scope_id_master_index_id_idx RENAME TO master_index_stats_old_scope_id_master_index_id_idx; |
||||||
|
|
||||||
|
ALTER MATERIALIZED VIEW master_index_stats_new RENAME TO master_index_stats; |
||||||
|
ALTER INDEX master_index_stats_new_scope_id_master_index_id_idx RENAME TO master_index_stats_scope_id_master_index_id_idx; |
||||||
|
|
||||||
|
CREATE OR REPLACE VIEW cache_stats AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
c.id AS cache_id, |
||||||
|
COALESCE(ms.valid_indexes, cs.valid_archives) AS valid_indexes, |
||||||
|
COALESCE(ms.indexes, cs.archives) AS indexes, |
||||||
|
COALESCE(ms.valid_groups, cs.valid_files) AS valid_groups, |
||||||
|
COALESCE(ms.groups, cs.files) AS groups, |
||||||
|
COALESCE(ms.valid_keys, 0) AS valid_keys, |
||||||
|
COALESCE(ms.keys, 0) AS keys, |
||||||
|
COALESCE(ms.size, cs.size) AS size, |
||||||
|
COALESCE(ms.blocks, cs.blocks) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN caches c |
||||||
|
LEFT JOIN master_index_stats ms ON ms.scope_id = s.id AND ms.master_index_id = c.id |
||||||
|
LEFT JOIN crc_table_stats cs ON s.name = 'runescape' AND cs.crc_table_id = c.id; |
||||||
|
|
||||||
|
DROP MATERIALIZED VIEW master_index_stats_old; |
||||||
|
DROP MATERIALIZED VIEW index_stats_old; |
@ -0,0 +1,53 @@ |
|||||||
|
-- @formatter:off |
||||||
|
DROP VIEW cache_stats; |
||||||
|
DROP MATERIALIZED VIEW crc_table_stats; |
||||||
|
DROP MATERIALIZED VIEW version_list_stats; |
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW version_list_stats AS |
||||||
|
SELECT |
||||||
|
v.blob_id, |
||||||
|
vf.index_id, |
||||||
|
COUNT(*) FILTER (WHERE b.id IS NOT NULL) AS valid_files, |
||||||
|
COUNT(*) AS files, |
||||||
|
SUM(length(b.data) + 2) FILTER (WHERE b.id IS NOT NULL) AS size, |
||||||
|
SUM(group_blocks(vf.file_id, length(b.data) + 2)) AS blocks |
||||||
|
FROM version_lists v |
||||||
|
JOIN version_list_files vf ON vf.blob_id = v.blob_id |
||||||
|
LEFT JOIN resolve_file(vf.index_id, vf.file_id, vf.version, vf.crc32) b ON TRUE |
||||||
|
GROUP BY v.blob_id, vf.index_id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON version_list_stats (blob_id, index_id); |
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW crc_table_stats AS |
||||||
|
SELECT |
||||||
|
c.id AS crc_table_id, |
||||||
|
COUNT(*) FILTER (WHERE b.id IS NOT NULL AND a.crc32 <> 0) AS valid_archives, |
||||||
|
COUNT(*) FILTER (WHERE a.crc32 <> 0) AS archives, |
||||||
|
SUM(COALESCE(s.valid_files, 0)) AS valid_files, |
||||||
|
SUM(COALESCE(s.files, 0)) AS files, |
||||||
|
SUM(COALESCE(s.size, 0)) + SUM(COALESCE(length(b.data), 0)) AS size, |
||||||
|
SUM(COALESCE(s.blocks, 0)) + SUM(COALESCE(group_blocks(a.archive_id, length(b.data)), 0)) AS blocks |
||||||
|
FROM crc_tables c |
||||||
|
LEFT JOIN crc_table_archives a ON a.crc_table_id = c.id |
||||||
|
LEFT JOIN resolve_archive(a.archive_id, a.crc32) b ON TRUE |
||||||
|
LEFT JOIN version_list_stats s ON s.blob_id = b.id |
||||||
|
GROUP BY c.id; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON crc_table_stats (crc_table_id); |
||||||
|
|
||||||
|
CREATE VIEW cache_stats AS |
||||||
|
SELECT |
||||||
|
s.id AS scope_id, |
||||||
|
c.id AS cache_id, |
||||||
|
COALESCE(ms.valid_indexes, cs.valid_archives) AS valid_indexes, |
||||||
|
COALESCE(ms.indexes, cs.archives) AS indexes, |
||||||
|
COALESCE(ms.valid_groups, cs.valid_files) AS valid_groups, |
||||||
|
COALESCE(ms.groups, cs.files) AS groups, |
||||||
|
COALESCE(ms.valid_keys, 0) AS valid_keys, |
||||||
|
COALESCE(ms.keys, 0) AS keys, |
||||||
|
COALESCE(ms.size, cs.size) AS size, |
||||||
|
COALESCE(ms.blocks, cs.blocks) AS blocks |
||||||
|
FROM scopes s |
||||||
|
CROSS JOIN caches c |
||||||
|
LEFT JOIN master_index_stats ms ON ms.scope_id = s.id AND ms.master_index_id = c.id |
||||||
|
LEFT JOIN crc_table_stats cs ON s.name = 'runescape' AND cs.crc_table_id = c.id; |
@ -0,0 +1,2 @@ |
|||||||
|
-- @formatter:off |
||||||
|
ALTER TYPE key_source ADD VALUE 'hdos'; |
@ -0,0 +1,3 @@ |
|||||||
|
-- @formatter:off |
||||||
|
|
||||||
|
ALTER TYPE source_type ADD VALUE 'cross_pollination'; |
@ -0,0 +1,7 @@ |
|||||||
|
-- @formatter:off |
||||||
|
|
||||||
|
ALTER TABLE sources |
||||||
|
ALTER COLUMN cache_id DROP NOT NULL, |
||||||
|
ALTER COLUMN game_id DROP NOT NULL; |
||||||
|
|
||||||
|
CREATE UNIQUE INDEX ON sources (type) WHERE type = 'cross_pollination'; |
@ -1,2 +1,3 @@ |
|||||||
|
-- @formatter:off |
||||||
ALTER TABLE games |
ALTER TABLE games |
||||||
DROP COLUMN key; |
DROP COLUMN key; |
||||||
|
@ -0,0 +1,43 @@ |
|||||||
|
var buildRegex = new RegExp('>([0-9]+)(?:[.]([0-9]+))?<'); |
||||||
|
|
||||||
|
function customSort(name, order, data) { |
||||||
|
order = order === 'asc' ? 1 : -1; |
||||||
|
|
||||||
|
data.sort(function (a, b) { |
||||||
|
a = a[name]; |
||||||
|
b = b[name]; |
||||||
|
|
||||||
|
if (!a) { |
||||||
|
return 1; |
||||||
|
} else if (!b) { |
||||||
|
return -1; |
||||||
|
} |
||||||
|
|
||||||
|
if (name === 'builds') { |
||||||
|
return buildSort(a, b) * order; |
||||||
|
} else { |
||||||
|
if (a < b) { |
||||||
|
return -order; |
||||||
|
} else if (a === b) { |
||||||
|
return 0; |
||||||
|
} else { |
||||||
|
return order; |
||||||
|
} |
||||||
|
} |
||||||
|
}); |
||||||
|
} |
||||||
|
|
||||||
|
function buildSort(a, b) { |
||||||
|
a = buildRegex.exec(a); |
||||||
|
b = buildRegex.exec(b); |
||||||
|
|
||||||
|
var aMajor = parseInt(a[1]); |
||||||
|
var bMajor = parseInt(b[1]); |
||||||
|
if (aMajor !== bMajor) { |
||||||
|
return aMajor - bMajor; |
||||||
|
} |
||||||
|
|
||||||
|
var aMinor = a[2] ? parseInt(a[2]) : 0; |
||||||
|
var bMinor = b[2] ? parseInt(b[2]) : 0; |
||||||
|
return aMinor - bMinor; |
||||||
|
} |
@ -0,0 +1,285 @@ |
|||||||
|
<!DOCTYPE html> |
||||||
|
<html xmlns:th="http://www.thymeleaf.org" lang="en"> |
||||||
|
<head th:replace="layout.html :: head"> |
||||||
|
<title>API - OpenRS2 Archive</title> |
||||||
|
<link rel="stylesheet" href="/webjars/bootstrap/css/bootstrap.min.css" /> |
||||||
|
<link rel="stylesheet" href="/static/css/openrs2.css" /> |
||||||
|
<script src="/webjars/jquery/jquery.min.js" defer></script> |
||||||
|
<script src="/webjars/bootstrap/js/bootstrap.bundle.min.js" defer></script> |
||||||
|
</head> |
||||||
|
<body> |
||||||
|
<nav th:replace="layout.html :: nav"></nav> |
||||||
|
<main class="container"> |
||||||
|
<h1>API</h1> |
||||||
|
|
||||||
|
<p>All endpoints accept requests from any origin. Range requests are not supported by any endpoint.</p> |
||||||
|
|
||||||
|
<h2><code>GET /caches.json</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Returns a list of all caches, including all data available on the main <a href="/caches">caches</a> |
||||||
|
page, in JSON format: |
||||||
|
</p> |
||||||
|
|
||||||
|
<pre><code>[ |
||||||
|
{ |
||||||
|
// The cache's internal ID. |
||||||
|
"id": 1, |
||||||
|
|
||||||
|
// A scope is a group of related games. Missing groups are only located |
||||||
|
// from caches for games in the same scope. |
||||||
|
// |
||||||
|
// Currently the "runescape" scope is used for the "runescape" and |
||||||
|
// "oldschool" games. Each FunOrb game has its own scope. |
||||||
|
// |
||||||
|
// Your code must be prepared for new scopes to be added in the future. |
||||||
|
"scope": "runescape", |
||||||
|
|
||||||
|
// The game's name. Your code must be prepared for new games to be |
||||||
|
// added in the future. |
||||||
|
"game": "runescape", |
||||||
|
|
||||||
|
// Currently either "live" or "beta", but your code must be prepared |
||||||
|
// for new environments to be added in the future. |
||||||
|
"environment": "live", |
||||||
|
|
||||||
|
// The language's ISO-639-1 code. Currently either "en", "de", "fr" or |
||||||
|
// "pt", but your code must be prepared for new languages to be added |
||||||
|
// in the future. |
||||||
|
"language": "en", |
||||||
|
|
||||||
|
// A list of build numbers the cache is associated with, which may be |
||||||
|
// empty if the build number(s) are not known. |
||||||
|
"builds": [ |
||||||
|
{ |
||||||
|
// The major number is always set. |
||||||
|
"major": 549, |
||||||
|
|
||||||
|
// The minor number may be null. |
||||||
|
"minor": null |
||||||
|
}, |
||||||
|
{ |
||||||
|
"major": 550, |
||||||
|
"minor": null |
||||||
|
} |
||||||
|
], |
||||||
|
|
||||||
|
// The earliest timestamp the cache was available to users, in ISO 8601 |
||||||
|
// format. May be null if not known. |
||||||
|
"timestamp": "2009-06-12T14:55:58Z", |
||||||
|
|
||||||
|
// A list of users who provided a copy of this cache. |
||||||
|
// |
||||||
|
// May be empty if the users wished to remain anonymous. |
||||||
|
// |
||||||
|
// The value "Jagex" indicates the cache was directly downloaded from |
||||||
|
// Jagex's servers by the OpenRS2 project, so we are completely certain |
||||||
|
// it is genuine. This value will never be used for a cache obtained |
||||||
|
// from a third party. |
||||||
|
"sources": [ |
||||||
|
"Erand", |
||||||
|
"Hlwys", |
||||||
|
"Jagex", |
||||||
|
"K4rn4ge", |
||||||
|
"Nathan", |
||||||
|
"Rune-Wars" |
||||||
|
], |
||||||
|
|
||||||
|
// In old engine caches, the number of valid .jag archives that are not |
||||||
|
// missing. |
||||||
|
// |
||||||
|
// In new engine caches, the number of valid JS5 indexes that are not |
||||||
|
// missing. |
||||||
|
// |
||||||
|
// May be null if the cache is still being processed. |
||||||
|
"valid_indexes": 29, |
||||||
|
|
||||||
|
// In old engine caches, the total number of .jag archives that should |
||||||
|
// exist, based on the cache's CRC table. |
||||||
|
// |
||||||
|
// In new engine caches, the total number of JS5 indexes that should |
||||||
|
// exist, based on the JS5 master index. |
||||||
|
// |
||||||
|
// May be null if the cache is still being processed. |
||||||
|
"indexes": 29, |
||||||
|
|
||||||
|
// The number of valid files (old engine) or valid groups (new engine) |
||||||
|
// that are not missing. May be null if the cache is still being processed. |
||||||
|
"valid_groups": 71002, |
||||||
|
|
||||||
|
// In old engine caches, the total number of files that should exist, |
||||||
|
// based on the cache's versionlist.jag archive. |
||||||
|
// |
||||||
|
// In new engine caches, the total number of groups that should exist, |
||||||
|
// based on the JS5 indexes that are available. |
||||||
|
// |
||||||
|
// May be null if the cache is still being processed. |
||||||
|
"groups": 71146, |
||||||
|
|
||||||
|
// The number of encrypted groups for which a valid key is available. |
||||||
|
// May be null if the cache is still being processed. |
||||||
|
"valid_keys": 1203, |
||||||
|
|
||||||
|
// The total number of encrypted groups in the cache. May be null if |
||||||
|
// the cache is still being processed. |
||||||
|
"keys": 1240, |
||||||
|
|
||||||
|
// The total size of all groups in the cache in bytes. May be null if |
||||||
|
// the cache is still being processed. |
||||||
|
"size": 74970573, |
||||||
|
|
||||||
|
// The number of 520-byte blocks required to store the cache's data in |
||||||
|
// a .dat2 file. May be null if the cache is still being processed. |
||||||
|
"blocks": 185273, |
||||||
|
|
||||||
|
// A boolean flag indicating if the cache is small enough to be |
||||||
|
// downloaded in .dat2/.idx format. May be null if the cache is still |
||||||
|
// being processed. |
||||||
|
"disk_store_valid": true |
||||||
|
}, |
||||||
|
... |
||||||
|
]</code></pre> |
||||||
|
|
||||||
|
<h2><code>GET /caches/<scope>/<id>/disk.zip</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Returns a cache as a ZIP archive of <code>.dat/.idx</code> |
||||||
|
(old engine) or <code>.dat2/.idx</code> (new engine) files. All |
||||||
|
files are stored underneath a <code>cache</code> subdirectory |
||||||
|
in the zip archive. |
||||||
|
</p> |
||||||
|
|
||||||
|
<h2><code>GET /caches/<scope>/<id>/flat-file.tar.gz</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Returns a cache as a gzipped tarball of files, where each |
||||||
|
file in the tarball holds a single file from the cache (old |
||||||
|
engine) or single group (new engine). |
||||||
|
</p> |
||||||
|
|
||||||
|
<p> |
||||||
|
The paths within the archive all have a format of |
||||||
|
<code>cache/<index>/<file>.dat</code> (old engine) |
||||||
|
or <code>cache/<archive>/<group>.dat</code> (new |
||||||
|
engine). |
||||||
|
</p> |
||||||
|
|
||||||
|
<p>The two byte version trailers are included.</p> |
||||||
|
|
||||||
|
<h2><code>GET /caches/<scope>/<id>/keys.json</code></h2> |
||||||
|
|
||||||
|
<p>Returns a list of valid XTEA keys for the cache in JSON format:</p> |
||||||
|
|
||||||
|
<pre><code>[ |
||||||
|
{ |
||||||
|
// The ID of the archive containing the group the key is used for. |
||||||
|
// Typically this is 5 (maps), but do note that RuneScape 3 does |
||||||
|
// support encrypting interfaces, though the functionality has not yet |
||||||
|
// been used, and some FunOrb games also have encrypted groups. |
||||||
|
"archive": 5, |
||||||
|
|
||||||
|
// The ID of the group the key is used for. |
||||||
|
"group": 1, |
||||||
|
|
||||||
|
// The group's name hash, or null if the group has no name. |
||||||
|
"name_hash": -1153472937, |
||||||
|
|
||||||
|
// The name of the group, if available, or null if the group has no |
||||||
|
// name or if the name is not known. |
||||||
|
"name": "l40_55", |
||||||
|
|
||||||
|
// The ID of the map square, if the group is an encrypted loc group |
||||||
|
// (has a name of lX_Z). The map square ID is ((X << 8) | Z). |
||||||
|
// null if the group is not an encrypted loc group. |
||||||
|
"mapsquare": 10295, |
||||||
|
|
||||||
|
// The XTEA key, represented as four 32-bit integers. |
||||||
|
"key": [ |
||||||
|
-1920480496, |
||||||
|
-1423914110, |
||||||
|
951774544, |
||||||
|
-1419269290 |
||||||
|
] |
||||||
|
}, |
||||||
|
... |
||||||
|
]</code></pre> |
||||||
|
|
||||||
|
<h2><code>GET /caches/<scope>/<id>/keys.zip</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Returns a zip archive file of valid XTEA keys for loc groups. |
||||||
|
Each key is stored in a text file containing four lines, with |
||||||
|
each line containing a 32-bit component of the key as a decimal |
||||||
|
string. The paths within the archive all have a format of |
||||||
|
<code>keys/<mapsquare>.txt</code>. |
||||||
|
</p> |
||||||
|
|
||||||
|
<h2><code>GET /caches/<scope>/<id>/map.png</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Renders the map squares in the cache, with a coloured outline |
||||||
|
representing whether we have a valid key for each map square or |
||||||
|
not: |
||||||
|
</p> |
||||||
|
|
||||||
|
<ul> |
||||||
|
<li><strong>Valid key:</strong> green outline.</li> |
||||||
|
<li><strong>Loc group is not encrypted:</strong> green outline.</li> |
||||||
|
<li><strong>Empty loc group:</strong> grey outline.</li> |
||||||
|
<li><strong>Key unknown:</strong> red outline.</li> |
||||||
|
</ul> |
||||||
|
|
||||||
|
<p> |
||||||
|
Empty loc groups may be replaced with an unencrypted equivalent |
||||||
|
with a cache editor. |
||||||
|
</p> |
||||||
|
|
||||||
|
<h2><code>GET /caches/<scope>/<id>/archives/<archive>/groups/<group>.dat</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Returns a single file (old engine) or group (new engine) in |
||||||
|
binary format. The response contains a <code>.jag</code> |
||||||
|
archive (index 0 of an old engine cache), a GZIP-compressed |
||||||
|
file (the remaining indexes of an old engine cache) or |
||||||
|
JS5-compressed data (new engine cache, also known as a |
||||||
|
container). The two byte version trailer is not included. |
||||||
|
</p> |
||||||
|
|
||||||
|
<h2><code>GET /keys/all.json</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Returns a list of all XTEA keys in the database, including |
||||||
|
candidate keys that have not been validated against any cache. |
||||||
|
</p> |
||||||
|
|
||||||
|
<pre><code>[ |
||||||
|
// The XTEA key, represented as four 32-bit integers. |
||||||
|
[ |
||||||
|
-2147135705, |
||||||
|
1113423446, |
||||||
|
1294100345, |
||||||
|
946019601 |
||||||
|
], |
||||||
|
... |
||||||
|
]</code></pre> |
||||||
|
|
||||||
|
<h2><code>GET /keys/valid.json</code></h2> |
||||||
|
|
||||||
|
<p> |
||||||
|
Returns a list of XTEA keys in the database, only including |
||||||
|
keys validated against at least one cache. |
||||||
|
</p> |
||||||
|
|
||||||
|
<pre><code>[ |
||||||
|
// The XTEA key, represented as four 32-bit integers. |
||||||
|
[ |
||||||
|
-2147135705, |
||||||
|
1113423446, |
||||||
|
1294100345, |
||||||
|
946019601 |
||||||
|
], |
||||||
|
... |
||||||
|
]</code></pre> |
||||||
|
</main> |
||||||
|
</body> |
||||||
|
</html> |
@ -0,0 +1,32 @@ |
|||||||
|
plugins { |
||||||
|
`maven-publish` |
||||||
|
application |
||||||
|
kotlin("jvm") |
||||||
|
} |
||||||
|
|
||||||
|
application { |
||||||
|
mainClass.set("org.openrs2.cache.cli.CacheCommandKt") |
||||||
|
} |
||||||
|
|
||||||
|
dependencies { |
||||||
|
api(libs.clikt) |
||||||
|
|
||||||
|
implementation(projects.cache) |
||||||
|
implementation(projects.inject) |
||||||
|
} |
||||||
|
|
||||||
|
publishing { |
||||||
|
publications.create<MavenPublication>("maven") { |
||||||
|
from(components["java"]) |
||||||
|
|
||||||
|
pom { |
||||||
|
packaging = "jar" |
||||||
|
name.set("OpenRS2 Cache CLI") |
||||||
|
description.set( |
||||||
|
""" |
||||||
|
Tools for working with RuneScape caches. |
||||||
|
""".trimIndent() |
||||||
|
) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,15 @@ |
|||||||
|
package org.openrs2.cache.cli |
||||||
|
|
||||||
|
import com.github.ajalt.clikt.core.NoOpCliktCommand |
||||||
|
import com.github.ajalt.clikt.core.subcommands |
||||||
|
|
||||||
|
public fun main(args: Array<String>): Unit = CacheCommand().main(args) |
||||||
|
|
||||||
|
public class CacheCommand : NoOpCliktCommand(name = "cache") { |
||||||
|
init { |
||||||
|
subcommands( |
||||||
|
OpenNxtUnpackCommand(), |
||||||
|
RuneLiteUnpackCommand() |
||||||
|
) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,26 @@ |
|||||||
|
package org.openrs2.cache.cli |
||||||
|
|
||||||
|
import com.github.ajalt.clikt.core.CliktCommand |
||||||
|
import com.github.ajalt.clikt.parameters.arguments.argument |
||||||
|
import com.github.ajalt.clikt.parameters.types.path |
||||||
|
import com.google.inject.Guice |
||||||
|
import io.netty.buffer.ByteBufAllocator |
||||||
|
import org.openrs2.cache.CacheModule |
||||||
|
import org.openrs2.cache.OpenNxtStore |
||||||
|
import org.openrs2.cache.Store |
||||||
|
import org.openrs2.inject.CloseableInjector |
||||||
|
|
||||||
|
public class OpenNxtUnpackCommand : CliktCommand(name = "unpack-opennxt") { |
||||||
|
private val input by argument().path(mustExist = true, canBeFile = false, mustBeReadable = true) |
||||||
|
private val output by argument().path(canBeFile = false, mustBeReadable = true, mustBeWritable = true) |
||||||
|
|
||||||
|
override fun run() { |
||||||
|
CloseableInjector(Guice.createInjector(CacheModule)).use { injector -> |
||||||
|
val alloc = injector.getInstance(ByteBufAllocator::class.java) |
||||||
|
|
||||||
|
Store.open(output, alloc).use { store -> |
||||||
|
OpenNxtStore.unpack(input, store) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,27 @@ |
|||||||
|
package org.openrs2.cache.cli |
||||||
|
|
||||||
|
import com.github.ajalt.clikt.core.CliktCommand |
||||||
|
import com.github.ajalt.clikt.parameters.arguments.argument |
||||||
|
import com.github.ajalt.clikt.parameters.types.path |
||||||
|
import com.google.inject.Guice |
||||||
|
import io.netty.buffer.ByteBufAllocator |
||||||
|
import org.openrs2.cache.CacheModule |
||||||
|
import org.openrs2.cache.RuneLiteStore |
||||||
|
import org.openrs2.cache.Store |
||||||
|
import org.openrs2.inject.CloseableInjector |
||||||
|
|
||||||
|
public class RuneLiteUnpackCommand : CliktCommand(name = "unpack-runelite") { |
||||||
|
private val input by argument().path(mustExist = true, canBeFile = false, mustBeReadable = true) |
||||||
|
private val output by argument().path(canBeFile = false, mustBeReadable = true, mustBeWritable = true) |
||||||
|
|
||||||
|
override fun run() { |
||||||
|
CloseableInjector(Guice.createInjector(CacheModule)).use { injector -> |
||||||
|
val alloc = injector.getInstance(ByteBufAllocator::class.java) |
||||||
|
val runeLiteStore = injector.getInstance(RuneLiteStore::class.java) |
||||||
|
|
||||||
|
Store.open(output, alloc).use { store -> |
||||||
|
runeLiteStore.unpack(input, store) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,89 @@ |
|||||||
|
package org.openrs2.cache |
||||||
|
|
||||||
|
import io.netty.buffer.Unpooled |
||||||
|
import org.openrs2.buffer.crc32 |
||||||
|
import org.openrs2.buffer.use |
||||||
|
import org.sqlite.SQLiteDataSource |
||||||
|
import java.nio.file.Files |
||||||
|
import java.nio.file.Path |
||||||
|
import java.sql.Connection |
||||||
|
|
||||||
|
public object OpenNxtStore { |
||||||
|
public fun unpack(input: Path, output: Store) { |
||||||
|
output.create(Store.ARCHIVESET) |
||||||
|
|
||||||
|
for (archive in 0..Store.MAX_ARCHIVE) { |
||||||
|
val path = input.resolve("js5-$archive.jcache") |
||||||
|
if (!Files.exists(path)) { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
val dataSource = SQLiteDataSource() |
||||||
|
dataSource.url = "jdbc:sqlite:$path" |
||||||
|
|
||||||
|
dataSource.connection.use { connection -> |
||||||
|
unpackArchive(connection, archive, output) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
private fun unpackArchive(connection: Connection, archive: Int, output: Store) { |
||||||
|
connection.prepareStatement( |
||||||
|
""" |
||||||
|
SELECT data, crc |
||||||
|
FROM cache_index |
||||||
|
WHERE key = 1 |
||||||
|
""".trimIndent() |
||||||
|
).use { stmt -> |
||||||
|
stmt.executeQuery().use { rows -> |
||||||
|
if (rows.next()) { |
||||||
|
val checksum = rows.getInt(2) |
||||||
|
|
||||||
|
Unpooled.wrappedBuffer(rows.getBytes(1)).use { buf -> |
||||||
|
val actualChecksum = buf.crc32() |
||||||
|
if (actualChecksum != checksum) { |
||||||
|
throw StoreCorruptException( |
||||||
|
"Js5Index corrupt (expected checksum $checksum, actual checksum $actualChecksum)" |
||||||
|
) |
||||||
|
} |
||||||
|
|
||||||
|
output.write(Store.ARCHIVESET, archive, buf) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
connection.prepareStatement( |
||||||
|
""" |
||||||
|
SELECT key, data, crc, version |
||||||
|
FROM cache |
||||||
|
""".trimIndent() |
||||||
|
).use { stmt -> |
||||||
|
stmt.executeQuery().use { rows -> |
||||||
|
while (rows.next()) { |
||||||
|
val group = rows.getInt(1) |
||||||
|
val checksum = rows.getInt(3) |
||||||
|
val version = rows.getInt(4) and 0xFFFF |
||||||
|
|
||||||
|
Unpooled.wrappedBuffer(rows.getBytes(2)).use { buf -> |
||||||
|
val actualVersion = VersionTrailer.peek(buf) |
||||||
|
if (actualVersion != version) { |
||||||
|
throw StoreCorruptException( |
||||||
|
"Group corrupt (expected version $version, actual version $actualVersion)" |
||||||
|
) |
||||||
|
} |
||||||
|
|
||||||
|
val actualChecksum = buf.slice(buf.readerIndex(), buf.writerIndex() - 2).crc32() |
||||||
|
if (actualChecksum != checksum) { |
||||||
|
throw StoreCorruptException( |
||||||
|
"Group corrupt (expected checksum $checksum, actual checksum $actualChecksum)" |
||||||
|
) |
||||||
|
} |
||||||
|
|
||||||
|
output.write(archive, group, buf) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue