Add indexes to the groups table

This will reduce the impact of checksum/version collisions, as a
collision would have to happen between two indexes of the same archive
rather than any two indexes.

Signed-off-by: Graham <gpe@openrs2.org>
pull/132/head
Graham 3 years ago
parent 723bd42a8e
commit 9834dccfdd
  1. 18
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheExporter.kt
  2. 19
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheImporter.kt

@ -21,18 +21,20 @@ public class CacheExporter @Inject constructor(
SELECT 255::uint1, e.archive_id::INTEGER, c.data, NULL SELECT 255::uint1, e.archive_id::INTEGER, c.data, NULL
FROM master_index_entries e FROM master_index_entries e
JOIN master_indexes m ON m.container_id = e.container_id JOIN master_indexes m ON m.container_id = e.container_id
JOIN containers c ON c.crc32 = e.crc32 JOIN groups g ON g.archive_id = 255 AND g.group_id = e.archive_id::INTEGER AND g.truncated_version = e.version & 65535
JOIN indexes i ON i.container_id = c.id AND i.version = e.version JOIN containers c ON c.id = g.container_id AND c.crc32 = e.crc32
JOIN indexes i ON i.container_id = g.container_id AND i.version = e.version
WHERE m.container_id = ? WHERE m.container_id = ?
UNION ALL UNION ALL
SELECT e.archive_id, ig.group_id, c.data, g.truncated_version SELECT e.archive_id, ie.group_id, c.data, g.truncated_version
FROM master_index_entries e FROM master_index_entries e
JOIN master_indexes m ON m.container_id = e.container_id JOIN master_indexes m ON m.container_id = e.container_id
JOIN containers ic ON ic.crc32 = e.crc32 JOIN groups ig ON ig.archive_id = 255 AND ig.group_id = e.archive_id::INTEGER AND ig.truncated_version = e.version & 65535
JOIN indexes i ON i.container_id = ic.id AND i.version = e.version JOIN containers ic ON ic.id = ig.container_id AND ic.crc32 = e.crc32
JOIN index_groups ig ON ig.container_id = i.container_id JOIN indexes i ON i.container_id = ig.container_id AND i.version = e.version
JOIN groups g ON g.archive_id = e.archive_id AND g.group_id = ig.group_id AND g.truncated_version = ig.version & 65535 JOIN index_groups ie ON ie.container_id = ig.container_id
JOIN containers c ON c.id = g.container_id AND c.crc32 = ig.crc32 JOIN groups g ON g.archive_id = e.archive_id AND g.group_id = ie.group_id AND g.truncated_version = ie.version & 65535
JOIN containers c ON c.id = g.container_id AND c.crc32 = ie.crc32
WHERE m.container_id = ? WHERE m.container_id = ?
""".trimIndent() """.trimIndent()
).use { stmt -> ).use { stmt ->

@ -47,11 +47,12 @@ public class CacheImporter @Inject constructor(
) : Container(data, false) ) : Container(data, false)
public class Index( public class Index(
archive: Int,
public val index: Js5Index, public val index: Js5Index,
data: ByteBuf, data: ByteBuf,
) : Container(data, false) ) : Group(Js5Archive.ARCHIVESET, archive, data, index.version, false)
public class Group( public open class Group(
public val archive: Int, public val archive: Int,
public val group: Int, public val group: Int,
data: ByteBuf, data: ByteBuf,
@ -218,7 +219,7 @@ public class CacheImporter @Inject constructor(
public suspend fun importIndexAndGetMissingGroups(archive: Int, index: Js5Index, buf: ByteBuf): List<Int> { public suspend fun importIndexAndGetMissingGroups(archive: Int, index: Js5Index, buf: ByteBuf): List<Int> {
return database.execute { connection -> return database.execute { connection ->
prepare(connection) prepare(connection)
addIndex(connection, Index(index, buf)) addIndex(connection, Index(archive, index, buf))
connection.prepareStatement( connection.prepareStatement(
""" """
@ -367,7 +368,7 @@ public class CacheImporter @Inject constructor(
} }
} }
private fun addGroups(connection: Connection, groups: List<Group>) { private fun addGroups(connection: Connection, groups: List<Group>): List<Long> {
val containerIds = addContainers(connection, groups) val containerIds = addContainers(connection, groups)
connection.prepareStatement( connection.prepareStatement(
@ -387,18 +388,24 @@ public class CacheImporter @Inject constructor(
stmt.executeBatch() stmt.executeBatch()
} }
return containerIds
}
private fun addGroup(connection: Connection, group: Group): Long {
return addGroups(connection, listOf(group)).single()
} }
private fun readIndex(store: Store, archive: Int): Index { private fun readIndex(store: Store, archive: Int): Index {
return store.read(Js5Archive.ARCHIVESET, archive).use { buf -> return store.read(Js5Archive.ARCHIVESET, archive).use { buf ->
Js5Compression.uncompress(buf.slice()).use { uncompressed -> Js5Compression.uncompress(buf.slice()).use { uncompressed ->
Index(Js5Index.read(uncompressed), buf.retain()) Index(archive, Js5Index.read(uncompressed), buf.retain())
} }
} }
} }
private fun addIndex(connection: Connection, index: Index) { private fun addIndex(connection: Connection, index: Index) {
val containerId = addContainer(connection, index) val containerId = addGroup(connection, index)
val savepoint = connection.setSavepoint() val savepoint = connection.setSavepoint()
connection.prepareStatement( connection.prepareStatement(

Loading…
Cancel
Save