Optimise uncompression of encrypted groups with invalid keys

Creating the InputStream first allows us to immediately fail if a key is
invalid, without having the chance of allocating a huge ByteBuf based on
an incorrect length.

Signed-off-by: Graham <gpe@openrs2.org>
master
Graham 2 years ago
parent caaddad0ed
commit 26651618ef
  1. 46
      cache/src/main/kotlin/org/openrs2/cache/Js5Compression.kt

@ -147,8 +147,8 @@ public object Js5Compression {
throw IOException("Uncompressed length is negative: $uncompressedLen") throw IOException("Uncompressed length is negative: $uncompressedLen")
} }
plaintext.alloc().buffer(uncompressedLen, uncompressedLen).use { output -> type.createInputStream(ByteBufInputStream(plaintext, len), uncompressedLen).use { inputStream ->
type.createInputStream(ByteBufInputStream(plaintext, len), uncompressedLen).use { inputStream -> plaintext.alloc().buffer(uncompressedLen, uncompressedLen).use { output ->
var remaining = uncompressedLen var remaining = uncompressedLen
while (remaining > 0) { while (remaining > 0) {
val n = output.writeBytes(inputStream, remaining) val n = output.writeBytes(inputStream, remaining)
@ -161,9 +161,9 @@ public object Js5Compression {
if (inputStream.read() != -1) { if (inputStream.read() != -1) {
throw IOException("Uncompressed data overflow") throw IOException("Uncompressed data overflow")
} }
}
return output.retain() return output.retain()
}
} }
} }
} }
@ -317,21 +317,21 @@ public object Js5Compression {
val uncompressedLen = plaintext.readInt() val uncompressedLen = plaintext.readInt()
check(uncompressedLen >= 0) check(uncompressedLen >= 0)
/** try {
* We don't pass uncompressedLen to the buffer here: in some cases, type.createInputStream(ByteBufInputStream(plaintext, len), uncompressedLen).use { inputStream ->
* an incorrect key can produce a valid header (particularly for /**
* LZMA, which has no magic number). If we're unlucky, * We don't pass uncompressedLen to the buffer here: in some cases,
* uncompressedLen will be a huge number (e.g. 1 or 2 gigabytes), * an incorrect key can produce a valid header (particularly for
* which might OOM some environments if allocated up front. * LZMA, which has no magic number). If we're unlucky,
* * uncompressedLen will be a huge number (e.g. 1 or 2 gigabytes),
* However, if the key is incorrect it's likely that actually * which might OOM some environments if allocated up front.
* attempting to uncompress the data will quickly produce an error, *
* long before we need to actually read 1 or 2 gigabytes of data. * However, if the key is incorrect it's likely that actually
* We therefore allow the buffer to grow dynamically. * attempting to uncompress the data will quickly produce an error,
*/ * long before we need to actually read 1 or 2 gigabytes of data.
plaintext.alloc().buffer().use { output -> * We therefore allow the buffer to grow dynamically.
try { */
type.createInputStream(ByteBufInputStream(plaintext, len), uncompressedLen).use { inputStream -> plaintext.alloc().buffer().use { output ->
var remaining = uncompressedLen var remaining = uncompressedLen
while (remaining > 0) { while (remaining > 0) {
val n = output.writeBytes(inputStream, remaining) val n = output.writeBytes(inputStream, remaining)
@ -346,12 +346,12 @@ public object Js5Compression {
// uncompressed data overflow // uncompressed data overflow
return null return null
} }
return output.retain()
} }
} catch (ex: IOException) {
return null
} }
} catch (ex: IOException) {
return output.retain() return null
} }
} }
} }

Loading…
Cancel
Save