Disable .dat2/.idx link if the cache is too big for a DiskStore

Signed-off-by: Graham <gpe@openrs2.org>
bzip2
Graham 3 years ago
parent a52955fe4b
commit cbb2a90388
  1. 24
      archive/src/main/kotlin/org/openrs2/archive/cache/CacheExporter.kt
  2. 67
      archive/src/main/resources/org/openrs2/archive/migrations/V6__blocks.sql
  3. 3
      archive/src/main/resources/org/openrs2/archive/templates/caches/index.html
  4. 3
      archive/src/main/resources/org/openrs2/archive/templates/caches/show.html
  5. 2
      cache/src/main/kotlin/org/openrs2/cache/DiskStore.kt

@ -4,6 +4,7 @@ import com.fasterxml.jackson.annotation.JsonProperty
import io.netty.buffer.ByteBufAllocator
import io.netty.buffer.Unpooled
import org.openrs2.buffer.use
import org.openrs2.cache.DiskStore
import org.openrs2.cache.Js5Archive
import org.openrs2.cache.Js5Compression
import org.openrs2.cache.Js5MasterIndex
@ -29,7 +30,8 @@ public class CacheExporter @Inject constructor(
val groups: Long,
val validKeys: Long,
val keys: Long,
val size: Long
val size: Long,
val blocks: Long
) {
public val allIndexesValid: Boolean = indexes == validIndexes && indexes != 0L
public val validIndexesFraction: Double = if (indexes == 0L) {
@ -51,6 +53,12 @@ public class CacheExporter @Inject constructor(
} else {
validKeys.toDouble() / keys
}
/*
* The max block ID is conveniently also the max number of blocks, as
* zero is reserved.
*/
public val diskStoreValid: Boolean = blocks <= DiskStore.MAX_BLOCK
}
public data class Build(val major: Int, val minor: Int?) : Comparable<Build> {
@ -139,13 +147,14 @@ public class CacheExporter @Inject constructor(
ms.groups,
ms.valid_keys,
ms.keys,
ms.size
ms.size,
ms.blocks
FROM master_indexes m
JOIN sources s ON s.master_index_id = m.id
JOIN games g ON g.id = s.game_id
LEFT JOIN master_index_stats ms ON ms.master_index_id = m.id
GROUP BY m.id, g.name, ms.valid_indexes, ms.indexes, ms.valid_groups, ms.groups, ms.valid_keys, ms.keys,
ms.size
ms.size, ms.blocks
) t
ORDER BY t.name ASC, t.builds[1] ASC, t.timestamp ASC
""".trimIndent()
@ -168,7 +177,8 @@ public class CacheExporter @Inject constructor(
val validKeys = rows.getLong(10)
val keys = rows.getLong(11)
val size = rows.getLong(12)
Stats(validIndexes, indexes, validGroups, groups, validKeys, keys, size)
val blocks = rows.getLong(13)
Stats(validIndexes, indexes, validGroups, groups, validKeys, keys, size, blocks)
} else {
null
}
@ -205,7 +215,8 @@ public class CacheExporter @Inject constructor(
ms.groups,
ms.valid_keys,
ms.keys,
ms.size
ms.size,
ms.blocks
FROM master_indexes m
JOIN containers c ON c.id = m.container_id
LEFT JOIN master_index_stats ms ON ms.master_index_id = m.id
@ -237,7 +248,8 @@ public class CacheExporter @Inject constructor(
val validKeys = rows.getLong(7)
val keys = rows.getLong(8)
val size = rows.getLong(9)
Stats(validIndexes, indexes, validGroups, groups, validKeys, keys, size)
val blocks = rows.getLong(10)
Stats(validIndexes, indexes, validGroups, groups, validKeys, keys, size, blocks)
}
}
}

@ -0,0 +1,67 @@
-- @formatter:off
CREATE FUNCTION group_blocks(group_id INTEGER, len INTEGER) RETURNS INTEGER AS $$
SELECT CASE
WHEN len = 0 THEN 1
WHEN group_id >= 65536 THEN (len + 509) / 510
ELSE (len + 511) / 512
END;
$$ LANGUAGE SQL IMMUTABLE PARALLEL SAFE;
CREATE MATERIALIZED VIEW master_index_stats_new (
master_index_id,
valid_indexes,
indexes,
valid_groups,
groups,
valid_keys,
keys,
size,
blocks
) AS
SELECT
m.id,
COALESCE(a.valid_indexes, 0),
COALESCE(a.indexes, 0),
COALESCE(g.valid_groups, 0),
COALESCE(g.groups, 0),
COALESCE(g.valid_keys, 0),
COALESCE(g.keys, 0),
COALESCE(a.size, 0) + COALESCE(g.size, 0),
COALESCE(a.blocks, 0) + COALESCE(g.blocks, 0)
FROM master_indexes m
LEFT JOIN (
SELECT
a.master_index_id,
COUNT(*) FILTER (WHERE c.id IS NOT NULL OR (a.version = 0 AND a.crc32 = 0)) AS valid_indexes,
COUNT(*) AS indexes,
SUM(length(c.data)) FILTER (WHERE c.id IS NOT NULL) AS size,
SUM(group_blocks(a.archive_id, length(c.data))) FILTER (WHERE c.id IS NOT NULL) AS blocks
FROM master_index_archives a
LEFT JOIN resolve_index(a.archive_id, a.crc32, a.version) c ON TRUE
GROUP BY a.master_index_id
) a ON a.master_index_id = m.id
LEFT JOIN (
SELECT
i.master_index_id,
COUNT(*) FILTER (WHERE c.id IS NOT NULL) AS valid_groups,
COUNT(*) AS groups,
COUNT(*) FILTER (WHERE c.encrypted AND (c.key_id IS NOT NULL OR c.empty_loc)) AS valid_keys,
COUNT(*) FILTER (WHERE c.encrypted) AS keys,
SUM(length(c.data)) FILTER (WHERE c.id IS NOT NULL) AS size,
SUM(group_blocks(ig.group_id, length(c.data))) FILTER (WHERE c.id IS NOT NULL) AS blocks
FROM resolved_indexes i
JOIN index_groups ig ON ig.container_id = i.container_id
LEFT JOIN resolve_group(i.archive_id, ig.group_id, ig.crc32, ig.version) c ON TRUE
LEFT JOIN keys k ON k.id = c.key_id
GROUP BY i.master_index_id
) g ON g.master_index_id = m.id;
CREATE UNIQUE INDEX ON master_index_stats_new (master_index_id);
ALTER MATERIALIZED VIEW master_index_stats RENAME TO master_index_stats_old;
ALTER INDEX master_index_stats_master_index_id_idx RENAME TO master_index_stats_old_master_index_id_idx;
ALTER MATERIALIZED VIEW master_index_stats_new RENAME TO master_index_stats;
ALTER INDEX master_index_stats_new_master_index_id_idx RENAME TO master_index_stats_master_index_id_idx;
DROP MATERIALIZED VIEW master_index_stats_old;

@ -79,7 +79,8 @@
Download
</button>
<ul class="dropdown-menu">
<li><a th:href="${'/caches/' + cache.id + '/disk.zip'}"
<li th:if="${cache.stats != null and cache.stats.diskStoreValid}"><a
th:href="${'/caches/' + cache.id + '/disk.zip'}"
class="dropdown-item">Cache (.dat2/.idx)</a></li>
<li><a th:href="${'/caches/' + cache.id + '/flat-file.zip'}"
class="dropdown-item">Cache (Flat file)</a></li>

@ -51,7 +51,8 @@
<td>
<div class="btn-toolbar">
<div class="btn-group me-2">
<a th:href="${'/caches/' + cache.id + '/disk.zip'}"
<a th:if="${cache.stats != null and cache.stats.diskStoreValid}"
th:href="${'/caches/' + cache.id + '/disk.zip'}"
class="btn btn-primary btn-sm">Cache (.dat2/.idx)</a>
<a th:href="${'/caches/' + cache.id + '/flat-file.zip'}"
class="btn btn-primary btn-sm">Cache (Flat file)</a>

@ -472,7 +472,7 @@ public class DiskStore private constructor(
internal const val EXTENDED_BLOCK_HEADER_SIZE = 10
internal const val EXTENDED_BLOCK_DATA_SIZE = 510
internal const val MAX_BLOCK = (1 shl 24) - 1
public const val MAX_BLOCK: Int = (1 shl 24) - 1
private val TEMP_BUFFER_SIZE = max(INDEX_ENTRY_SIZE, max(BLOCK_HEADER_SIZE, EXTENDED_BLOCK_HEADER_SIZE))
private const val INDEX_BUFFER_SIZE = INDEX_ENTRY_SIZE * 1000

Loading…
Cancel
Save