Store non-truncated versions in the archiving service if available

Signed-off-by: Graham <gpe@openrs2.org>
pull/132/head
Graham 3 years ago
parent cbeb9a3a67
commit 4154e4fdb4
  1. 22
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheExporter.kt
  2. 52
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt
  3. 11
      archive/src/main/kotlin/org/openrs2/archive/cache/Js5ChannelHandler.kt
  4. 8
      archive/src/main/resources/org/openrs2/archive/V1__init.sql

@ -80,18 +80,22 @@ public class CacheExporter @Inject constructor(
SELECT a.archive_id, c.data, g.container_id SELECT a.archive_id, c.data, g.container_id
FROM master_indexes m FROM master_indexes m
JOIN master_index_archives a ON a.container_id = m.container_id JOIN master_index_archives a ON a.container_id = m.container_id
JOIN groups g ON g.archive_id = 255 AND g.group_id = a.archive_id::INTEGER AND g.truncated_version = a.version & 65535 JOIN groups g ON g.archive_id = 255 AND g.group_id = a.archive_id::INTEGER
AND g.version = a.version AND NOT g.version_truncated
JOIN containers c ON c.id = g.container_id AND c.crc32 = a.crc32 JOIN containers c ON c.id = g.container_id AND c.crc32 = a.crc32
JOIN indexes i ON i.container_id = g.container_id AND i.version = a.version JOIN indexes i ON i.container_id = g.container_id
WHERE m.container_id = ? WHERE m.container_id = ?
) )
SELECT 255::uint1, t.archive_id::INTEGER, t.data, NULL SELECT 255::uint1, t.archive_id::INTEGER, t.data, NULL
FROM t FROM t
UNION ALL UNION ALL
SELECT t.archive_id, ig.group_id, c.data, g.truncated_version SELECT t.archive_id, ig.group_id, c.data, g.version
FROM t FROM t
JOIN index_groups ig ON ig.container_id = t.container_id JOIN index_groups ig ON ig.container_id = t.container_id
JOIN groups g ON g.archive_id = t.archive_id::INTEGER AND g.group_id = ig.group_id AND g.truncated_version = ig.version & 65535 JOIN groups g ON g.archive_id = t.archive_id::INTEGER AND g.group_id = ig.group_id AND (
(g.version = ig.version AND NOT g.version_truncated) OR
(g.version = ig.version & 65535 AND g.version_truncated)
)
JOIN containers c ON c.id = g.container_id AND c.crc32 = ig.crc32 JOIN containers c ON c.id = g.container_id AND c.crc32 = ig.crc32
""".trimIndent() """.trimIndent()
).use { stmt -> ).use { stmt ->
@ -135,15 +139,19 @@ public class CacheExporter @Inject constructor(
SELECT a.archive_id, c.data, g.container_id SELECT a.archive_id, c.data, g.container_id
FROM master_indexes m FROM master_indexes m
JOIN master_index_archives a ON a.container_id = m.container_id JOIN master_index_archives a ON a.container_id = m.container_id
JOIN groups g ON g.archive_id = 255 AND g.group_id = a.archive_id::INTEGER AND g.truncated_version = a.version & 65535 JOIN groups g ON g.archive_id = 255 AND g.group_id = a.archive_id::INTEGER
AND g.version = a.version AND NOT g.version_truncated
JOIN containers c ON c.id = g.container_id AND c.crc32 = a.crc32 JOIN containers c ON c.id = g.container_id AND c.crc32 = a.crc32
JOIN indexes i ON i.container_id = g.container_id AND i.version = a.version JOIN indexes i ON i.container_id = g.container_id
WHERE m.container_id = ? WHERE m.container_id = ?
) )
SELECT t.archive_id, ig.group_id, ig.name_hash, n.name, (k.key).k0, (k.key).k1, (k.key).k2, (k.key).k3 SELECT t.archive_id, ig.group_id, ig.name_hash, n.name, (k.key).k0, (k.key).k1, (k.key).k2, (k.key).k3
FROM t FROM t
JOIN index_groups ig ON ig.container_id = t.container_id JOIN index_groups ig ON ig.container_id = t.container_id
JOIN groups g ON g.archive_id = t.archive_id::INTEGER AND g.group_id = ig.group_id AND g.truncated_version = ig.version & 65535 JOIN groups g ON g.archive_id = t.archive_id::INTEGER AND g.group_id = ig.group_id AND (
(g.version = ig.version AND NOT g.version_truncated) OR
(g.version = ig.version & 65535 AND g.version_truncated)
)
JOIN containers c ON c.id = g.container_id AND c.crc32 = ig.crc32 JOIN containers c ON c.id = g.container_id AND c.crc32 = ig.crc32
JOIN keys k ON k.id = c.key_id JOIN keys k ON k.id = c.key_id
LEFT JOIN names n ON n.hash = ig.name_hash AND n.name ~ '^l(?:[0-9]|[1-9][0-9])_(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$' LEFT JOIN names n ON n.hash = ig.name_hash AND n.name ~ '^l(?:[0-9]|[1-9][0-9])_(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$'

@ -50,13 +50,14 @@ public class CacheImporter @Inject constructor(
archive: Int, archive: Int,
public val index: Js5Index, public val index: Js5Index,
data: ByteBuf, data: ByteBuf,
) : Group(Js5Archive.ARCHIVESET, archive, data, index.version, false) ) : Group(Js5Archive.ARCHIVESET, archive, data, index.version, false, false)
public open class Group( public open class Group(
public val archive: Int, public val archive: Int,
public val group: Int, public val group: Int,
data: ByteBuf, data: ByteBuf,
public val version: Int, public val version: Int,
public val versionTruncated: Boolean,
encrypted: Boolean encrypted: Boolean
) : Container(data, encrypted) ) : Container(data, encrypted)
@ -82,17 +83,20 @@ public class CacheImporter @Inject constructor(
} }
// import indexes // import indexes
val indexes = mutableListOf<Index>() val indexes = arrayOfNulls<Js5Index>(Js5Archive.ARCHIVESET)
val indexGroups = mutableListOf<Index>()
try { try {
for (archive in store.list(Js5Archive.ARCHIVESET)) { for (archive in store.list(Js5Archive.ARCHIVESET)) {
indexes += readIndex(store, archive) val indexGroup = readIndex(store, archive)
indexes[archive] = indexGroup.index
indexGroups += indexGroup
} }
for (index in indexes) { for (index in indexGroups) {
addIndex(connection, index) addIndex(connection, index)
} }
} finally { } finally {
indexes.forEach(Index::release) indexGroups.forEach(Index::release)
} }
// import groups // import groups
@ -103,8 +107,10 @@ public class CacheImporter @Inject constructor(
continue continue
} }
val index = indexes[archive]
for (id in store.list(archive)) { for (id in store.list(archive)) {
val group = readGroup(store, archive, id) ?: continue val group = readGroup(store, archive, index, id) ?: continue
groups += group groups += group
if (groups.size >= BATCH_SIZE) { if (groups.size >= BATCH_SIZE) {
@ -265,11 +271,15 @@ public class CacheImporter @Inject constructor(
stmt.executeBatch() stmt.executeBatch()
} }
// we deliberately ignore groups with truncated versions here and
// re-download them, just in case there's a (crc32, truncated version)
// collision
connection.prepareStatement( connection.prepareStatement(
""" """
SELECT t.group_id SELECT t.group_id
FROM tmp_groups t FROM tmp_groups t
LEFT JOIN groups g ON g.archive_id = ? AND g.group_id = t.group_id AND g.truncated_version = t.version & 65535 LEFT JOIN groups g ON g.archive_id = ? AND g.group_id = t.group_id AND g.version = t.version AND
NOT g.version_truncated
LEFT JOIN containers c ON c.id = g.container_id AND c.crc32 = t.crc32 LEFT JOIN containers c ON c.id = g.container_id AND c.crc32 = t.crc32
WHERE g.container_id IS NULL WHERE g.container_id IS NULL
ORDER BY t.group_id ASC ORDER BY t.group_id ASC
@ -455,12 +465,24 @@ public class CacheImporter @Inject constructor(
} }
} }
private fun readGroup(store: Store, archive: Int, group: Int): Group? { private fun readGroup(store: Store, archive: Int, index: Js5Index?, group: Int): Group? {
try { try {
store.read(archive, group).use { buf -> store.read(archive, group).use { buf ->
val version = VersionTrailer.strip(buf) ?: return null var version = VersionTrailer.strip(buf) ?: return null
var versionTruncated = true
val encrypted = Js5Compression.isEncrypted(buf.slice()) val encrypted = Js5Compression.isEncrypted(buf.slice())
return Group(archive, group, buf.retain(), version, encrypted)
// grab the non-truncated version from the Js5Index if we can
// confirm the group on disk matches the group in the index
if (index != null) {
val entry = index[group]
if (entry != null && entry.checksum == buf.crc32() && (entry.version and 0xFFFF) == version) {
version = entry.version
versionTruncated = false
}
}
return Group(archive, group, buf.retain(), version, versionTruncated, encrypted)
} }
} catch (ex: IOException) { } catch (ex: IOException) {
return null return null
@ -472,8 +494,8 @@ public class CacheImporter @Inject constructor(
connection.prepareStatement( connection.prepareStatement(
""" """
INSERT INTO groups (archive_id, group_id, container_id, truncated_version) INSERT INTO groups (archive_id, group_id, container_id, version, version_truncated)
VALUES (?, ?, ?, ?) VALUES (?, ?, ?, ?, ?)
ON CONFLICT DO NOTHING ON CONFLICT DO NOTHING
""".trimIndent() """.trimIndent()
).use { stmt -> ).use { stmt ->
@ -482,6 +504,7 @@ public class CacheImporter @Inject constructor(
stmt.setInt(2, group.group) stmt.setInt(2, group.group)
stmt.setLong(3, containerIds[i]) stmt.setLong(3, containerIds[i])
stmt.setInt(4, group.version) stmt.setInt(4, group.version)
stmt.setBoolean(5, group.versionTruncated)
stmt.addBatch() stmt.addBatch()
} }
@ -509,12 +532,11 @@ public class CacheImporter @Inject constructor(
connection.prepareStatement( connection.prepareStatement(
""" """
INSERT INTO indexes (container_id, version) INSERT INTO indexes (container_id)
VALUES (?, ?) VALUES (?)
""".trimIndent() """.trimIndent()
).use { stmt -> ).use { stmt ->
stmt.setLong(1, containerId) stmt.setLong(1, containerId)
stmt.setInt(2, index.index.version)
try { try {
stmt.execute() stmt.execute()

@ -131,9 +131,14 @@ public class Js5ChannelHandler(
throw Exception("Group checksum invalid") throw Exception("Group checksum invalid")
} }
val version = entry.version groups += CacheImporter.Group(
val encrypted = Js5Compression.isEncrypted(response.data.slice()) response.archive,
groups += CacheImporter.Group(response.archive, response.group, response.data.retain(), version, encrypted) response.group,
response.data.retain(),
entry.version,
versionTruncated = false,
Js5Compression.isEncrypted(response.data.slice())
)
} }
val complete = pendingRequests.isEmpty() && inFlightRequests.isEmpty() val complete = pendingRequests.isEmpty() && inFlightRequests.isEmpty()

@ -51,13 +51,13 @@ CREATE TABLE groups (
archive_id uint1 NOT NULL, archive_id uint1 NOT NULL,
group_id INTEGER NOT NULL, group_id INTEGER NOT NULL,
container_id BIGINT NOT NULL REFERENCES containers (id), container_id BIGINT NOT NULL REFERENCES containers (id),
truncated_version uint2 NOT NULL, version INTEGER NOT NULL,
PRIMARY KEY (archive_id, group_id, container_id, truncated_version) version_truncated BOOLEAN NOT NULL,
PRIMARY KEY (archive_id, group_id, container_id, version, version_truncated)
); );
CREATE TABLE indexes ( CREATE TABLE indexes (
container_id BIGINT PRIMARY KEY NOT NULL REFERENCES containers (id), container_id BIGINT PRIMARY KEY NOT NULL REFERENCES containers (id)
version INTEGER NOT NULL
); );
CREATE TABLE index_groups ( CREATE TABLE index_groups (

Loading…
Cancel
Save