python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
strncpy(instance.buf, large_src, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-strncpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memcpy(large, instance.buf, sizeof(large))
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow2-memcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memset(instance.buf, 0x42, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow_field-memset.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memchr_inv(small, 0x7A, sizeof(small) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow-memchr_inv.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memmove(large, instance.buf, sizeof(large))
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow2-memmove.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memchr(small, 0x7A, sizeof(small) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow-memchr.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
strlcpy(instance.buf, large_src, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-strlcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memcpy(large, instance.buf, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow2_field-memcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
strlcpy(small, large_src, sizeof(small) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-strlcpy-src.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memcpy(instance.buf, large, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow_field-memcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memmove(instance.buf, large, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow_field-memmove.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
strscpy(instance.buf, large_src, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-strscpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memcmp(small, large, sizeof(small) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow-memcmp.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
strcpy(small, large_src)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-strcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memset(instance.buf, 0x5A, sizeof(large_src))
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-memset.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memmove(large, instance.buf, sizeof(instance.buf) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow2_field-memmove.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 842 Software Compression
*
* Copyright (C) 2015 Dan Streetman, IBM Corp
*
* See 842.h for details of the 842 compressed format.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "842_compress"
#include <linux/hashtable.h>
#include "842.h"
#include "842_debugfs.h"
#define SW842_HASHTABLE8_BITS (10)
#define SW842_HASHTABLE4_BITS (11)
#define SW842_HASHTABLE2_BITS (10)
/* By default, we allow compressing input buffers of any length, but we must
* use the non-standard "short data" template so the decompressor can correctly
* reproduce the uncompressed data buffer at the right length. However the
* hardware 842 compressor will not recognize the "short data" template, and
* will fail to decompress any compressed buffer containing it (I have no idea
* why anyone would want to use software to compress and hardware to decompress
* but that's beside the point). This parameter forces the compression
* function to simply reject any input buffer that isn't a multiple of 8 bytes
* long, instead of using the "short data" template, so that all compressed
* buffers produced by this function will be decompressable by the 842 hardware
* decompressor. Unless you have a specific need for that, leave this disabled
* so that any length buffer can be compressed.
*/
static bool sw842_strict;
module_param_named(strict, sw842_strict, bool, 0644);
static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */
{ I8, N0, N0, N0, 0x19 }, /* 8 */
{ I4, I4, N0, N0, 0x18 }, /* 18 */
{ I4, I2, I2, N0, 0x17 }, /* 25 */
{ I2, I2, I4, N0, 0x13 }, /* 25 */
{ I2, I2, I2, I2, 0x12 }, /* 32 */
{ I4, I2, D2, N0, 0x16 }, /* 33 */
{ I4, D2, I2, N0, 0x15 }, /* 33 */
{ I2, D2, I4, N0, 0x0e }, /* 33 */
{ D2, I2, I4, N0, 0x09 }, /* 33 */
{ I2, I2, I2, D2, 0x11 }, /* 40 */
{ I2, I2, D2, I2, 0x10 }, /* 40 */
{ I2, D2, I2, I2, 0x0d }, /* 40 */
{ D2, I2, I2, I2, 0x08 }, /* 40 */
{ I4, D4, N0, N0, 0x14 }, /* 41 */
{ D4, I4, N0, N0, 0x04 }, /* 41 */
{ I2, I2, D4, N0, 0x0f }, /* 48 */
{ I2, D2, I2, D2, 0x0c }, /* 48 */
{ I2, D4, I2, N0, 0x0b }, /* 48 */
{ D2, I2, I2, D2, 0x07 }, /* 48 */
{ D2, I2, D2, I2, 0x06 }, /* 48 */
{ D4, I2, I2, N0, 0x03 }, /* 48 */
{ I2, D2, D4, N0, 0x0a }, /* 56 */
{ D2, I2, D4, N0, 0x05 }, /* 56 */
{ D4, I2, D2, N0, 0x02 }, /* 56 */
{ D4, D2, I2, N0, 0x01 }, /* 56 */
{ D8, N0, N0, N0, 0x00 }, /* 64 */
};
struct sw842_hlist_node8 {
struct hlist_node node;
u64 data;
u8 index;
};
struct sw842_hlist_node4 {
struct hlist_node node;
u32 data;
u16 index;
};
struct sw842_hlist_node2 {
struct hlist_node node;
u16 data;
u8 index;
};
#define INDEX_NOT_FOUND (-1)
#define INDEX_NOT_CHECKED (-2)
struct sw842_param {
u8 *in;
u8 *instart;
u64 ilen;
u8 *out;
u64 olen;
u8 bit;
u64 data8[1];
u32 data4[2];
u16 data2[4];
int index8[1];
int index4[2];
int index2[4];
DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS);
DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS);
DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS);
struct sw842_hlist_node8 node8[1 << I8_BITS];
struct sw842_hlist_node4 node4[1 << I4_BITS];
struct sw842_hlist_node2 node2[1 << I2_BITS];
};
#define get_input_data(p, o, b) \
be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o))))
#define init_hashtable_nodes(p, b) do { \
int _i; \
hash_init((p)->htable##b); \
for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \
(p)->node##b[_i].index = _i; \
(p)->node##b[_i].data = 0; \
INIT_HLIST_NODE(&(p)->node##b[_i].node); \
} \
} while (0)
#define find_index(p, b, n) ({ \
struct sw842_hlist_node##b *_n; \
p->index##b[n] = INDEX_NOT_FOUND; \
hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \
if (p->data##b[n] == _n->data) { \
p->index##b[n] = _n->index; \
break; \
} \
} \
p->index##b[n] >= 0; \
})
#define check_index(p, b, n) \
((p)->index##b[n] == INDEX_NOT_CHECKED \
? find_index(p, b, n) \
: (p)->index##b[n] >= 0)
#define replace_hash(p, b, i, d) do { \
struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \
hash_del(&_n->node); \
_n->data = (p)->data##b[d]; \
pr_debug("add hash index%x %x pos %x data %lx\n", b, \
(unsigned int)_n->index, \
(unsigned int)((p)->in - (p)->instart), \
(unsigned long)_n->data); \
hash_add((p)->htable##b, &_n->node, _n->data); \
} while (0)
static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe };
static int add_bits(struct sw842_param *p, u64 d, u8 n);
static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s)
{
int ret;
if (n <= s)
return -EINVAL;
ret = add_bits(p, d >> s, n - s);
if (ret)
return ret;
return add_bits(p, d & GENMASK_ULL(s - 1, 0), s);
}
static int add_bits(struct sw842_param *p, u64 d, u8 n)
{
int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits;
u64 o;
u8 *out = p->out;
pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d);
if (n > 64)
return -EINVAL;
/* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0),
* or if we're at the end of the output buffer and would write past end
*/
if (bits > 64)
return __split_add_bits(p, d, n, 32);
else if (p->olen < 8 && bits > 32 && bits <= 56)
return __split_add_bits(p, d, n, 16);
else if (p->olen < 4 && bits > 16 && bits <= 24)
return __split_add_bits(p, d, n, 8);
if (DIV_ROUND_UP(bits, 8) > p->olen)
return -ENOSPC;
o = *out & bmask[b];
d <<= s;
if (bits <= 8)
*out = o | d;
else if (bits <= 16)
put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out);
else if (bits <= 24)
put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out);
else if (bits <= 32)
put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out);
else if (bits <= 40)
put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out);
else if (bits <= 48)
put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out);
else if (bits <= 56)
put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out);
else
put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out);
p->bit += n;
if (p->bit > 7) {
p->out += p->bit / 8;
p->olen -= p->bit / 8;
p->bit %= 8;
}
return 0;
}
static int add_template(struct sw842_param *p, u8 c)
{
int ret, i, b = 0;
u8 *t = comp_ops[c];
bool inv = false;
if (c >= OPS_MAX)
return -EINVAL;
pr_debug("template %x\n", t[4]);
ret = add_bits(p, t[4], OP_BITS);
if (ret)
return ret;
for (i = 0; i < 4; i++) {
pr_debug("op %x\n", t[i]);
switch (t[i] & OP_AMOUNT) {
case OP_AMOUNT_8:
if (b)
inv = true;
else if (t[i] & OP_ACTION_INDEX)
ret = add_bits(p, p->index8[0], I8_BITS);
else if (t[i] & OP_ACTION_DATA)
ret = add_bits(p, p->data8[0], 64);
else
inv = true;
break;
case OP_AMOUNT_4:
if (b == 2 && t[i] & OP_ACTION_DATA)
ret = add_bits(p, get_input_data(p, 2, 32), 32);
else if (b != 0 && b != 4)
inv = true;
else if (t[i] & OP_ACTION_INDEX)
ret = add_bits(p, p->index4[b >> 2], I4_BITS);
else if (t[i] & OP_ACTION_DATA)
ret = add_bits(p, p->data4[b >> 2], 32);
else
inv = true;
break;
case OP_AMOUNT_2:
if (b != 0 && b != 2 && b != 4 && b != 6)
inv = true;
if (t[i] & OP_ACTION_INDEX)
ret = add_bits(p, p->index2[b >> 1], I2_BITS);
else if (t[i] & OP_ACTION_DATA)
ret = add_bits(p, p->data2[b >> 1], 16);
else
inv = true;
break;
case OP_AMOUNT_0:
inv = (b != 8) || !(t[i] & OP_ACTION_NOOP);
break;
default:
inv = true;
break;
}
if (ret)
return ret;
if (inv) {
pr_err("Invalid templ %x op %d : %x %x %x %x\n",
c, i, t[0], t[1], t[2], t[3]);
return -EINVAL;
}
b += t[i] & OP_AMOUNT;
}
if (b != 8) {
pr_err("Invalid template %x len %x : %x %x %x %x\n",
c, b, t[0], t[1], t[2], t[3]);
return -EINVAL;
}
if (sw842_template_counts)
atomic_inc(&template_count[t[4]]);
return 0;
}
static int add_repeat_template(struct sw842_param *p, u8 r)
{
int ret;
/* repeat param is 0-based */
if (!r || --r > REPEAT_BITS_MAX)
return -EINVAL;
ret = add_bits(p, OP_REPEAT, OP_BITS);
if (ret)
return ret;
ret = add_bits(p, r, REPEAT_BITS);
if (ret)
return ret;
if (sw842_template_counts)
atomic_inc(&template_repeat_count);
return 0;
}
static int add_short_data_template(struct sw842_param *p, u8 b)
{
int ret, i;
if (!b || b > SHORT_DATA_BITS_MAX)
return -EINVAL;
ret = add_bits(p, OP_SHORT_DATA, OP_BITS);
if (ret)
return ret;
ret = add_bits(p, b, SHORT_DATA_BITS);
if (ret)
return ret;
for (i = 0; i < b; i++) {
ret = add_bits(p, p->in[i], 8);
if (ret)
return ret;
}
if (sw842_template_counts)
atomic_inc(&template_short_data_count);
return 0;
}
static int add_zeros_template(struct sw842_param *p)
{
int ret = add_bits(p, OP_ZEROS, OP_BITS);
if (ret)
return ret;
if (sw842_template_counts)
atomic_inc(&template_zeros_count);
return 0;
}
static int add_end_template(struct sw842_param *p)
{
int ret = add_bits(p, OP_END, OP_BITS);
if (ret)
return ret;
if (sw842_template_counts)
atomic_inc(&template_end_count);
return 0;
}
static bool check_template(struct sw842_param *p, u8 c)
{
u8 *t = comp_ops[c];
int i, match, b = 0;
if (c >= OPS_MAX)
return false;
for (i = 0; i < 4; i++) {
if (t[i] & OP_ACTION_INDEX) {
if (t[i] & OP_AMOUNT_2)
match = check_index(p, 2, b >> 1);
else if (t[i] & OP_AMOUNT_4)
match = check_index(p, 4, b >> 2);
else if (t[i] & OP_AMOUNT_8)
match = check_index(p, 8, 0);
else
return false;
if (!match)
return false;
}
b += t[i] & OP_AMOUNT;
}
return true;
}
static void get_next_data(struct sw842_param *p)
{
p->data8[0] = get_input_data(p, 0, 64);
p->data4[0] = get_input_data(p, 0, 32);
p->data4[1] = get_input_data(p, 4, 32);
p->data2[0] = get_input_data(p, 0, 16);
p->data2[1] = get_input_data(p, 2, 16);
p->data2[2] = get_input_data(p, 4, 16);
p->data2[3] = get_input_data(p, 6, 16);
}
/* update the hashtable entries.
* only call this after finding/adding the current template
* the dataN fields for the current 8 byte block must be already updated
*/
static void update_hashtables(struct sw842_param *p)
{
u64 pos = p->in - p->instart;
u64 n8 = (pos >> 3) % (1 << I8_BITS);
u64 n4 = (pos >> 2) % (1 << I4_BITS);
u64 n2 = (pos >> 1) % (1 << I2_BITS);
replace_hash(p, 8, n8, 0);
replace_hash(p, 4, n4, 0);
replace_hash(p, 4, n4, 1);
replace_hash(p, 2, n2, 0);
replace_hash(p, 2, n2, 1);
replace_hash(p, 2, n2, 2);
replace_hash(p, 2, n2, 3);
}
/* find the next template to use, and add it
* the p->dataN fields must already be set for the current 8 byte block
*/
static int process_next(struct sw842_param *p)
{
int ret, i;
p->index8[0] = INDEX_NOT_CHECKED;
p->index4[0] = INDEX_NOT_CHECKED;
p->index4[1] = INDEX_NOT_CHECKED;
p->index2[0] = INDEX_NOT_CHECKED;
p->index2[1] = INDEX_NOT_CHECKED;
p->index2[2] = INDEX_NOT_CHECKED;
p->index2[3] = INDEX_NOT_CHECKED;
/* check up to OPS_MAX - 1; last op is our fallback */
for (i = 0; i < OPS_MAX - 1; i++) {
if (check_template(p, i))
break;
}
ret = add_template(p, i);
if (ret)
return ret;
return 0;
}
/**
* sw842_compress
*
* Compress the uncompressed buffer of length @ilen at @in to the output buffer
* @out, using no more than @olen bytes, using the 842 compression format.
*
* Returns: 0 on success, error on failure. The @olen parameter
* will contain the number of output bytes written on success, or
* 0 on error.
*/
int sw842_compress(const u8 *in, unsigned int ilen,
u8 *out, unsigned int *olen, void *wmem)
{
struct sw842_param *p = (struct sw842_param *)wmem;
int ret;
u64 last, next, pad, total;
u8 repeat_count = 0;
u32 crc;
BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS);
init_hashtable_nodes(p, 8);
init_hashtable_nodes(p, 4);
init_hashtable_nodes(p, 2);
p->in = (u8 *)in;
p->instart = p->in;
p->ilen = ilen;
p->out = out;
p->olen = *olen;
p->bit = 0;
total = p->olen;
*olen = 0;
/* if using strict mode, we can only compress a multiple of 8 */
if (sw842_strict && (ilen % 8)) {
pr_err("Using strict mode, can't compress len %d\n", ilen);
return -EINVAL;
}
/* let's compress at least 8 bytes, mkay? */
if (unlikely(ilen < 8))
goto skip_comp;
/* make initial 'last' different so we don't match the first time */
last = ~get_unaligned((u64 *)p->in);
while (p->ilen > 7) {
next = get_unaligned((u64 *)p->in);
/* must get the next data, as we need to update the hashtable
* entries with the new data every time
*/
get_next_data(p);
/* we don't care about endianness in last or next;
* we're just comparing 8 bytes to another 8 bytes,
* they're both the same endianness
*/
if (next == last) {
/* repeat count bits are 0-based, so we stop at +1 */
if (++repeat_count <= REPEAT_BITS_MAX)
goto repeat;
}
if (repeat_count) {
ret = add_repeat_template(p, repeat_count);
repeat_count = 0;
if (next == last) /* reached max repeat bits */
goto repeat;
}
if (next == 0)
ret = add_zeros_template(p);
else
ret = process_next(p);
if (ret)
return ret;
repeat:
last = next;
update_hashtables(p);
p->in += 8;
p->ilen -= 8;
}
if (repeat_count) {
ret = add_repeat_template(p, repeat_count);
if (ret)
return ret;
}
skip_comp:
if (p->ilen > 0) {
ret = add_short_data_template(p, p->ilen);
if (ret)
return ret;
p->in += p->ilen;
p->ilen = 0;
}
ret = add_end_template(p);
if (ret)
return ret;
/*
* crc(0:31) is appended to target data starting with the next
* bit after End of stream template.
* nx842 calculates CRC for data in big-endian format. So doing
* same here so that sw842 decompression can be used for both
* compressed data.
*/
crc = crc32_be(0, in, ilen);
ret = add_bits(p, crc, CRC_BITS);
if (ret)
return ret;
if (p->bit) {
p->out++;
p->olen--;
p->bit = 0;
}
/* pad compressed length to multiple of 8 */
pad = (8 - ((total - p->olen) % 8)) % 8;
if (pad) {
if (pad > p->olen) /* we were so close! */
return -ENOSPC;
memset(p->out, 0, pad);
p->out += pad;
p->olen -= pad;
}
if (unlikely((total - p->olen) > UINT_MAX))
return -ENOSPC;
*olen = total - p->olen;
return 0;
}
EXPORT_SYMBOL_GPL(sw842_compress);
static int __init sw842_init(void)
{
if (sw842_template_counts)
sw842_debugfs_create();
return 0;
}
module_init(sw842_init);
static void __exit sw842_exit(void)
{
if (sw842_template_counts)
sw842_debugfs_remove();
}
module_exit(sw842_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software 842 Compressor");
MODULE_AUTHOR("Dan Streetman <[email protected]>");
| linux-master | lib/842/842_compress.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 842 Software Decompression
*
* Copyright (C) 2015 Dan Streetman, IBM Corp
*
* See 842.h for details of the 842 compressed format.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "842_decompress"
#include "842.h"
#include "842_debugfs.h"
/* rolling fifo sizes */
#define I2_FIFO_SIZE (2 * (1 << I2_BITS))
#define I4_FIFO_SIZE (4 * (1 << I4_BITS))
#define I8_FIFO_SIZE (8 * (1 << I8_BITS))
static u8 decomp_ops[OPS_MAX][4] = {
{ D8, N0, N0, N0 },
{ D4, D2, I2, N0 },
{ D4, I2, D2, N0 },
{ D4, I2, I2, N0 },
{ D4, I4, N0, N0 },
{ D2, I2, D4, N0 },
{ D2, I2, D2, I2 },
{ D2, I2, I2, D2 },
{ D2, I2, I2, I2 },
{ D2, I2, I4, N0 },
{ I2, D2, D4, N0 },
{ I2, D4, I2, N0 },
{ I2, D2, I2, D2 },
{ I2, D2, I2, I2 },
{ I2, D2, I4, N0 },
{ I2, I2, D4, N0 },
{ I2, I2, D2, I2 },
{ I2, I2, I2, D2 },
{ I2, I2, I2, I2 },
{ I2, I2, I4, N0 },
{ I4, D4, N0, N0 },
{ I4, D2, I2, N0 },
{ I4, I2, D2, N0 },
{ I4, I2, I2, N0 },
{ I4, I4, N0, N0 },
{ I8, N0, N0, N0 }
};
struct sw842_param {
u8 *in;
u8 bit;
u64 ilen;
u8 *out;
u8 *ostart;
u64 olen;
};
#define beN_to_cpu(d, s) \
((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \
(s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \
(s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \
0)
static int next_bits(struct sw842_param *p, u64 *d, u8 n);
static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s)
{
u64 tmp = 0;
int ret;
if (n <= s) {
pr_debug("split_next_bits invalid n %u s %u\n", n, s);
return -EINVAL;
}
ret = next_bits(p, &tmp, n - s);
if (ret)
return ret;
ret = next_bits(p, d, s);
if (ret)
return ret;
*d |= tmp << s;
return 0;
}
static int next_bits(struct sw842_param *p, u64 *d, u8 n)
{
u8 *in = p->in, b = p->bit, bits = b + n;
if (n > 64) {
pr_debug("next_bits invalid n %u\n", n);
return -EINVAL;
}
/* split this up if reading > 8 bytes, or if we're at the end of
* the input buffer and would read past the end
*/
if (bits > 64)
return __split_next_bits(p, d, n, 32);
else if (p->ilen < 8 && bits > 32 && bits <= 56)
return __split_next_bits(p, d, n, 16);
else if (p->ilen < 4 && bits > 16 && bits <= 24)
return __split_next_bits(p, d, n, 8);
if (DIV_ROUND_UP(bits, 8) > p->ilen)
return -EOVERFLOW;
if (bits <= 8)
*d = *in >> (8 - bits);
else if (bits <= 16)
*d = be16_to_cpu(get_unaligned((__be16 *)in)) >> (16 - bits);
else if (bits <= 32)
*d = be32_to_cpu(get_unaligned((__be32 *)in)) >> (32 - bits);
else
*d = be64_to_cpu(get_unaligned((__be64 *)in)) >> (64 - bits);
*d &= GENMASK_ULL(n - 1, 0);
p->bit += n;
if (p->bit > 7) {
p->in += p->bit / 8;
p->ilen -= p->bit / 8;
p->bit %= 8;
}
return 0;
}
static int do_data(struct sw842_param *p, u8 n)
{
u64 v;
int ret;
if (n > p->olen)
return -ENOSPC;
ret = next_bits(p, &v, n * 8);
if (ret)
return ret;
switch (n) {
case 2:
put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out);
break;
case 4:
put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out);
break;
case 8:
put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out);
break;
default:
return -EINVAL;
}
p->out += n;
p->olen -= n;
return 0;
}
static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize)
{
u64 index, offset, total = round_down(p->out - p->ostart, 8);
int ret;
ret = next_bits(p, &index, bits);
if (ret)
return ret;
offset = index * size;
/* a ring buffer of fsize is used; correct the offset */
if (total > fsize) {
/* this is where the current fifo is */
u64 section = round_down(total, fsize);
/* the current pos in the fifo */
u64 pos = total - section;
/* if the offset is past/at the pos, we need to
* go back to the last fifo section
*/
if (offset >= pos)
section -= fsize;
offset += section;
}
if (offset + size > total) {
pr_debug("index%x %lx points past end %lx\n", size,
(unsigned long)offset, (unsigned long)total);
return -EINVAL;
}
if (size != 2 && size != 4 && size != 8)
WARN(1, "__do_index invalid size %x\n", size);
else
pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n",
size, (unsigned long)index,
(unsigned long)(index * size), (unsigned long)offset,
(unsigned long)total,
(unsigned long)beN_to_cpu(&p->ostart[offset], size));
memcpy(p->out, &p->ostart[offset], size);
p->out += size;
p->olen -= size;
return 0;
}
static int do_index(struct sw842_param *p, u8 n)
{
switch (n) {
case 2:
return __do_index(p, 2, I2_BITS, I2_FIFO_SIZE);
case 4:
return __do_index(p, 4, I4_BITS, I4_FIFO_SIZE);
case 8:
return __do_index(p, 8, I8_BITS, I8_FIFO_SIZE);
default:
return -EINVAL;
}
}
static int do_op(struct sw842_param *p, u8 o)
{
int i, ret = 0;
if (o >= OPS_MAX)
return -EINVAL;
for (i = 0; i < 4; i++) {
u8 op = decomp_ops[o][i];
pr_debug("op is %x\n", op);
switch (op & OP_ACTION) {
case OP_ACTION_DATA:
ret = do_data(p, op & OP_AMOUNT);
break;
case OP_ACTION_INDEX:
ret = do_index(p, op & OP_AMOUNT);
break;
case OP_ACTION_NOOP:
break;
default:
pr_err("Internal error, invalid op %x\n", op);
return -EINVAL;
}
if (ret)
return ret;
}
if (sw842_template_counts)
atomic_inc(&template_count[o]);
return 0;
}
/**
* sw842_decompress
*
* Decompress the 842-compressed buffer of length @ilen at @in
* to the output buffer @out, using no more than @olen bytes.
*
* The compressed buffer must be only a single 842-compressed buffer,
* with the standard format described in the comments in 842.h
* Processing will stop when the 842 "END" template is detected,
* not the end of the buffer.
*
* Returns: 0 on success, error on failure. The @olen parameter
* will contain the number of output bytes written on success, or
* 0 on error.
*/
int sw842_decompress(const u8 *in, unsigned int ilen,
u8 *out, unsigned int *olen)
{
struct sw842_param p;
int ret;
u64 op, rep, tmp, bytes, total;
u64 crc;
p.in = (u8 *)in;
p.bit = 0;
p.ilen = ilen;
p.out = out;
p.ostart = out;
p.olen = *olen;
total = p.olen;
*olen = 0;
do {
ret = next_bits(&p, &op, OP_BITS);
if (ret)
return ret;
pr_debug("template is %lx\n", (unsigned long)op);
switch (op) {
case OP_REPEAT:
ret = next_bits(&p, &rep, REPEAT_BITS);
if (ret)
return ret;
if (p.out == out) /* no previous bytes */
return -EINVAL;
/* copy rep + 1 */
rep++;
if (rep * 8 > p.olen)
return -ENOSPC;
while (rep-- > 0) {
memcpy(p.out, p.out - 8, 8);
p.out += 8;
p.olen -= 8;
}
if (sw842_template_counts)
atomic_inc(&template_repeat_count);
break;
case OP_ZEROS:
if (8 > p.olen)
return -ENOSPC;
memset(p.out, 0, 8);
p.out += 8;
p.olen -= 8;
if (sw842_template_counts)
atomic_inc(&template_zeros_count);
break;
case OP_SHORT_DATA:
ret = next_bits(&p, &bytes, SHORT_DATA_BITS);
if (ret)
return ret;
if (!bytes || bytes > SHORT_DATA_BITS_MAX)
return -EINVAL;
while (bytes-- > 0) {
ret = next_bits(&p, &tmp, 8);
if (ret)
return ret;
*p.out = (u8)tmp;
p.out++;
p.olen--;
}
if (sw842_template_counts)
atomic_inc(&template_short_data_count);
break;
case OP_END:
if (sw842_template_counts)
atomic_inc(&template_end_count);
break;
default: /* use template */
ret = do_op(&p, op);
if (ret)
return ret;
break;
}
} while (op != OP_END);
/*
* crc(0:31) is saved in compressed data starting with the
* next bit after End of stream template.
*/
ret = next_bits(&p, &crc, CRC_BITS);
if (ret)
return ret;
/*
* Validate CRC saved in compressed data.
*/
if (crc != (u64)crc32_be(0, out, total - p.olen)) {
pr_debug("CRC mismatch for decompression\n");
return -EINVAL;
}
if (unlikely((total - p.olen) > UINT_MAX))
return -ENOSPC;
*olen = total - p.olen;
return 0;
}
EXPORT_SYMBOL_GPL(sw842_decompress);
static int __init sw842_init(void)
{
if (sw842_template_counts)
sw842_debugfs_create();
return 0;
}
module_init(sw842_init);
static void __exit sw842_exit(void)
{
if (sw842_template_counts)
sw842_debugfs_remove();
}
module_exit(sw842_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software 842 Decompressor");
MODULE_AUTHOR("Dan Streetman <[email protected]>");
| linux-master | lib/842/842_decompress.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2014 Seth Jennings <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
#include <linux/seq_file.h>
static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%s: %s\n", THIS_MODULE->name,
"this has been live patched");
return 0;
}
static struct klp_func funcs[] = {
{
.old_name = "cmdline_proc_show",
.new_func = livepatch_cmdline_proc_show,
}, { }
};
static struct klp_object objs[] = {
{
/* name being NULL means vmlinux */
.funcs = funcs,
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
};
static int test_klp_livepatch_init(void)
{
return klp_enable_patch(&patch);
}
static void test_klp_livepatch_exit(void)
{
}
module_init(test_klp_livepatch_init);
module_exit(test_klp_livepatch_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
MODULE_AUTHOR("Seth Jennings <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: livepatch module");
| linux-master | lib/livepatch/test_klp_livepatch.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Joe Lawrence <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
static int pre_patch_ret;
module_param(pre_patch_ret, int, 0644);
MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
static const char *const module_state[] = {
[MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
[MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
[MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
[MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
};
static void callback_info(const char *callback, struct klp_object *obj)
{
if (obj->mod)
pr_info("%s: %s -> %s\n", callback, obj->mod->name,
module_state[obj->mod->state]);
else
pr_info("%s: vmlinux\n", callback);
}
/* Executed on object patching (ie, patch enablement) */
static int pre_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
return pre_patch_ret;
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
/* Executed on object unpatching (ie, patch disablement) */
static void pre_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
static void patched_work_func(struct work_struct *work)
{
pr_info("%s\n", __func__);
}
static struct klp_func no_funcs[] = {
{}
};
static struct klp_func busymod_funcs[] = {
{
.old_name = "busymod_work_func",
.new_func = patched_work_func,
}, {}
};
static struct klp_object objs[] = {
{
.name = NULL, /* vmlinux */
.funcs = no_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, {
.name = "test_klp_callbacks_mod",
.funcs = no_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, {
.name = "test_klp_callbacks_busy",
.funcs = busymod_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
};
static int test_klp_callbacks_demo_init(void)
{
return klp_enable_patch(&patch);
}
static void test_klp_callbacks_demo_exit(void)
{
}
module_init(test_klp_callbacks_demo_init);
module_exit(test_klp_callbacks_demo_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
MODULE_AUTHOR("Joe Lawrence <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: livepatch demo");
| linux-master | lib/livepatch/test_klp_callbacks_demo.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Joe Lawrence <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
static int replace;
module_param(replace, int, 0644);
MODULE_PARM_DESC(replace, "replace (default=0)");
#include <linux/seq_file.h>
static int livepatch_meminfo_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%s: %s\n", THIS_MODULE->name,
"this has been live patched");
return 0;
}
static struct klp_func funcs[] = {
{
.old_name = "meminfo_proc_show",
.new_func = livepatch_meminfo_proc_show,
}, {}
};
static struct klp_object objs[] = {
{
/* name being NULL means vmlinux */
.funcs = funcs,
}, {}
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
/* set .replace in the init function below for demo purposes */
};
static int test_klp_atomic_replace_init(void)
{
patch.replace = replace;
return klp_enable_patch(&patch);
}
static void test_klp_atomic_replace_exit(void)
{
}
module_init(test_klp_atomic_replace_init);
module_exit(test_klp_atomic_replace_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
MODULE_AUTHOR("Joe Lawrence <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: atomic replace");
| linux-master | lib/livepatch/test_klp_atomic_replace.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Joe Lawrence <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/livepatch.h>
static int replace;
module_param(replace, int, 0644);
MODULE_PARM_DESC(replace, "replace (default=0)");
static const char *const module_state[] = {
[MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
[MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
[MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
[MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
};
static void callback_info(const char *callback, struct klp_object *obj)
{
if (obj->mod)
pr_info("%s: %s -> %s\n", callback, obj->mod->name,
module_state[obj->mod->state]);
else
pr_info("%s: vmlinux\n", callback);
}
/* Executed on object patching (ie, patch enablement) */
static int pre_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
return 0;
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
/* Executed on object unpatching (ie, patch disablement) */
static void pre_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
}
static struct klp_func no_funcs[] = {
{ }
};
static struct klp_object objs[] = {
{
.name = NULL, /* vmlinux */
.funcs = no_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
/* set .replace in the init function below for demo purposes */
};
static int test_klp_callbacks_demo2_init(void)
{
patch.replace = replace;
return klp_enable_patch(&patch);
}
static void test_klp_callbacks_demo2_exit(void)
{
}
module_init(test_klp_callbacks_demo2_init);
module_exit(test_klp_callbacks_demo2_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
MODULE_AUTHOR("Joe Lawrence <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: livepatch demo2");
| linux-master | lib/livepatch/test_klp_callbacks_demo2.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 SUSE
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/livepatch.h>
#define CONSOLE_LOGLEVEL_STATE 1
/* Version 2 supports migration. */
#define CONSOLE_LOGLEVEL_STATE_VERSION 2
static const char *const module_state[] = {
[MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
[MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
[MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
[MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
};
static void callback_info(const char *callback, struct klp_object *obj)
{
if (obj->mod)
pr_info("%s: %s -> %s\n", callback, obj->mod->name,
module_state[obj->mod->state]);
else
pr_info("%s: vmlinux\n", callback);
}
static struct klp_patch patch;
static int allocate_loglevel_state(void)
{
struct klp_state *loglevel_state, *prev_loglevel_state;
prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
if (prev_loglevel_state) {
pr_info("%s: space to store console_loglevel already allocated\n",
__func__);
return 0;
}
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return -EINVAL;
loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
if (!loglevel_state->data)
return -ENOMEM;
pr_info("%s: allocating space to store console_loglevel\n",
__func__);
return 0;
}
static void fix_console_loglevel(void)
{
struct klp_state *loglevel_state, *prev_loglevel_state;
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return;
prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
if (prev_loglevel_state) {
pr_info("%s: taking over the console_loglevel change\n",
__func__);
loglevel_state->data = prev_loglevel_state->data;
return;
}
pr_info("%s: fixing console_loglevel\n", __func__);
*(int *)loglevel_state->data = console_loglevel;
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
}
static void restore_console_loglevel(void)
{
struct klp_state *loglevel_state, *prev_loglevel_state;
prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
if (prev_loglevel_state) {
pr_info("%s: passing the console_loglevel change back to the old livepatch\n",
__func__);
return;
}
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return;
pr_info("%s: restoring console_loglevel\n", __func__);
console_loglevel = *(int *)loglevel_state->data;
}
static void free_loglevel_state(void)
{
struct klp_state *loglevel_state, *prev_loglevel_state;
prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
if (prev_loglevel_state) {
pr_info("%s: keeping space to store console_loglevel\n",
__func__);
return;
}
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return;
pr_info("%s: freeing space for the stored console_loglevel\n",
__func__);
kfree(loglevel_state->data);
}
/* Executed on object patching (ie, patch enablement) */
static int pre_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
return allocate_loglevel_state();
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
fix_console_loglevel();
}
/* Executed on object unpatching (ie, patch disablement) */
static void pre_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
restore_console_loglevel();
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
free_loglevel_state();
}
static struct klp_func no_funcs[] = {
{}
};
static struct klp_object objs[] = {
{
.name = NULL, /* vmlinux */
.funcs = no_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, { }
};
static struct klp_state states[] = {
{
.id = CONSOLE_LOGLEVEL_STATE,
.version = CONSOLE_LOGLEVEL_STATE_VERSION,
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
.states = states,
.replace = true,
};
static int test_klp_callbacks_demo_init(void)
{
return klp_enable_patch(&patch);
}
static void test_klp_callbacks_demo_exit(void)
{
}
module_init(test_klp_callbacks_demo_init);
module_exit(test_klp_callbacks_demo_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
MODULE_AUTHOR("Petr Mladek <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: system state modification");
| linux-master | lib/livepatch/test_klp_state2.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Joe Lawrence <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
/* load/run-time control from sysfs writer */
static bool block_transition;
module_param(block_transition, bool, 0644);
MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
static void busymod_work_func(struct work_struct *work);
static DECLARE_WORK(work, busymod_work_func);
static DECLARE_COMPLETION(busymod_work_started);
static void busymod_work_func(struct work_struct *work)
{
pr_info("%s enter\n", __func__);
complete(&busymod_work_started);
while (READ_ONCE(block_transition)) {
/*
* Busy-wait until the sysfs writer has acknowledged a
* blocked transition and clears the flag.
*/
msleep(20);
}
pr_info("%s exit\n", __func__);
}
static int test_klp_callbacks_busy_init(void)
{
pr_info("%s\n", __func__);
schedule_work(&work);
/*
* To synchronize kernel messages, hold the init function from
* exiting until the work function's entry message has printed.
*/
wait_for_completion(&busymod_work_started);
if (!block_transition) {
/*
* Serialize output: print all messages from the work
* function before returning from init().
*/
flush_work(&work);
}
return 0;
}
static void test_klp_callbacks_busy_exit(void)
{
WRITE_ONCE(block_transition, false);
flush_work(&work);
pr_info("%s\n", __func__);
}
module_init(test_klp_callbacks_busy_init);
module_exit(test_klp_callbacks_busy_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joe Lawrence <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: busy target module");
| linux-master | lib/livepatch/test_klp_callbacks_busy.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Joe Lawrence <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/livepatch.h>
#include <linux/slab.h>
/*
* Keep a small list of pointers so that we can print address-agnostic
* pointer values. Use a rolling integer count to differentiate the values.
* Ironically we could have used the shadow variable API to do this, but
* let's not lean too heavily on the very code we're testing.
*/
static LIST_HEAD(ptr_list);
struct shadow_ptr {
void *ptr;
int id;
struct list_head list;
};
static void free_ptr_list(void)
{
struct shadow_ptr *sp, *tmp_sp;
list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) {
list_del(&sp->list);
kfree(sp);
}
}
static int ptr_id(void *ptr)
{
struct shadow_ptr *sp;
static int count;
list_for_each_entry(sp, &ptr_list, list) {
if (sp->ptr == ptr)
return sp->id;
}
sp = kmalloc(sizeof(*sp), GFP_ATOMIC);
if (!sp)
return -ENOMEM;
sp->ptr = ptr;
sp->id = count++;
list_add(&sp->list, &ptr_list);
return sp->id;
}
/*
* Shadow variable wrapper functions that echo the function and arguments
* to the kernel log for testing verification. Don't display raw pointers,
* but use the ptr_id() value instead.
*/
static void *shadow_get(void *obj, unsigned long id)
{
int **sv;
sv = klp_shadow_get(obj, id);
pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
__func__, ptr_id(obj), id, ptr_id(sv));
return sv;
}
static void *shadow_alloc(void *obj, unsigned long id, size_t size,
gfp_t gfp_flags, klp_shadow_ctor_t ctor,
void *ctor_data)
{
int **var = ctor_data;
int **sv;
sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
__func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
ptr_id(*var), ptr_id(sv));
return sv;
}
static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
gfp_t gfp_flags, klp_shadow_ctor_t ctor,
void *ctor_data)
{
int **var = ctor_data;
int **sv;
sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
__func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
ptr_id(*var), ptr_id(sv));
return sv;
}
static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
{
klp_shadow_free(obj, id, dtor);
pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n",
__func__, ptr_id(obj), id, ptr_id(dtor));
}
static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
{
klp_shadow_free_all(id, dtor);
pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
}
/* Shadow variable constructor - remember simple pointer data */
static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
{
int **sv = shadow_data;
int **var = ctor_data;
if (!var)
return -EINVAL;
*sv = *var;
pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
return 0;
}
/*
* With more than one item to free in the list, order is not determined and
* shadow_dtor will not be passed to shadow_free_all() which would make the
* test fail. (see pass 6)
*/
static void shadow_dtor(void *obj, void *shadow_data)
{
int **sv = shadow_data;
pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
__func__, ptr_id(obj), ptr_id(sv));
}
/* number of objects we simulate that need shadow vars */
#define NUM_OBJS 3
/* dynamically created obj fields have the following shadow var id values */
#define SV_ID1 0x1234
#define SV_ID2 0x1235
/*
* The main test case adds/removes new fields (shadow var) to each of these
* test structure instances. The last group of fields in the struct represent
* the idea that shadow variables may be added and removed to and from the
* struct during execution.
*/
struct test_object {
/* add anything here below and avoid to define an empty struct */
struct shadow_ptr sp;
/* these represent shadow vars added and removed with SV_ID{1,2} */
/* char nfield1; */
/* int nfield2; */
};
static int test_klp_shadow_vars_init(void)
{
struct test_object objs[NUM_OBJS];
char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
char *pndup[NUM_OBJS];
int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
void **sv;
int ret;
int i;
ptr_id(NULL);
/*
* With an empty shadow variable hash table, expect not to find
* any matches.
*/
sv = shadow_get(&objs[0], SV_ID1);
if (!sv)
pr_info(" got expected NULL result\n");
/* pass 1: init & alloc a char+int pair of svars for each objs */
for (i = 0; i < NUM_OBJS; i++) {
pnfields1[i] = &nfields1[i];
ptr_id(pnfields1[i]);
if (i % 2) {
sv1[i] = shadow_alloc(&objs[i], SV_ID1,
sizeof(pnfields1[i]), GFP_KERNEL,
shadow_ctor, &pnfields1[i]);
} else {
sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
sizeof(pnfields1[i]), GFP_KERNEL,
shadow_ctor, &pnfields1[i]);
}
if (!sv1[i]) {
ret = -ENOMEM;
goto out;
}
pnfields2[i] = &nfields2[i];
ptr_id(pnfields2[i]);
sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
GFP_KERNEL, shadow_ctor, &pnfields2[i]);
if (!sv2[i]) {
ret = -ENOMEM;
goto out;
}
}
/* pass 2: verify we find allocated svars and where they point to */
for (i = 0; i < NUM_OBJS; i++) {
/* check the "char" svar for all objects */
sv = shadow_get(&objs[i], SV_ID1);
if (!sv) {
ret = -EINVAL;
goto out;
}
if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv1[i]), ptr_id(*sv1[i]));
/* check the "int" svar for all objects */
sv = shadow_get(&objs[i], SV_ID2);
if (!sv) {
ret = -EINVAL;
goto out;
}
if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv2[i]), ptr_id(*sv2[i]));
}
/* pass 3: verify that 'get_or_alloc' returns already allocated svars */
for (i = 0; i < NUM_OBJS; i++) {
pndup[i] = &nfields1[i];
ptr_id(pndup[i]);
sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
GFP_KERNEL, shadow_ctor, &pndup[i]);
if (!sv) {
ret = -EINVAL;
goto out;
}
if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv1[i]), ptr_id(*sv1[i]));
}
/* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
for (i = 0; i < NUM_OBJS; i++) {
shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
sv = shadow_get(&objs[i], SV_ID1);
if (!sv)
pr_info(" got expected NULL result\n");
}
/* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
for (i = 0; i < NUM_OBJS; i++) {
sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
if (!sv) {
ret = -EINVAL;
goto out;
}
if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
pr_info(" got expected PTR%d -> PTR%d result\n",
ptr_id(sv2[i]), ptr_id(*sv2[i]));
}
/* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
for (i = 0; i < NUM_OBJS; i++) {
sv = shadow_get(&objs[i], SV_ID2);
if (!sv)
pr_info(" got expected NULL result\n");
}
free_ptr_list();
return 0;
out:
shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
free_ptr_list();
return ret;
}
static void test_klp_shadow_vars_exit(void)
{
}
module_init(test_klp_shadow_vars_init);
module_exit(test_klp_shadow_vars_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joe Lawrence <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: shadow variables");
| linux-master | lib/livepatch/test_klp_shadow_vars.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 SUSE
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/livepatch.h>
#define CONSOLE_LOGLEVEL_STATE 1
/* Version 1 does not support migration. */
#define CONSOLE_LOGLEVEL_STATE_VERSION 1
static const char *const module_state[] = {
[MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
[MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
[MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
[MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
};
static void callback_info(const char *callback, struct klp_object *obj)
{
if (obj->mod)
pr_info("%s: %s -> %s\n", callback, obj->mod->name,
module_state[obj->mod->state]);
else
pr_info("%s: vmlinux\n", callback);
}
static struct klp_patch patch;
static int allocate_loglevel_state(void)
{
struct klp_state *loglevel_state;
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return -EINVAL;
loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
if (!loglevel_state->data)
return -ENOMEM;
pr_info("%s: allocating space to store console_loglevel\n",
__func__);
return 0;
}
static void fix_console_loglevel(void)
{
struct klp_state *loglevel_state;
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return;
pr_info("%s: fixing console_loglevel\n", __func__);
*(int *)loglevel_state->data = console_loglevel;
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
}
static void restore_console_loglevel(void)
{
struct klp_state *loglevel_state;
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return;
pr_info("%s: restoring console_loglevel\n", __func__);
console_loglevel = *(int *)loglevel_state->data;
}
static void free_loglevel_state(void)
{
struct klp_state *loglevel_state;
loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
if (!loglevel_state)
return;
pr_info("%s: freeing space for the stored console_loglevel\n",
__func__);
kfree(loglevel_state->data);
}
/* Executed on object patching (ie, patch enablement) */
static int pre_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
return allocate_loglevel_state();
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_patch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
fix_console_loglevel();
}
/* Executed on object unpatching (ie, patch disablement) */
static void pre_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
restore_console_loglevel();
}
/* Executed on object unpatching (ie, patch disablement) */
static void post_unpatch_callback(struct klp_object *obj)
{
callback_info(__func__, obj);
free_loglevel_state();
}
static struct klp_func no_funcs[] = {
{}
};
static struct klp_object objs[] = {
{
.name = NULL, /* vmlinux */
.funcs = no_funcs,
.callbacks = {
.pre_patch = pre_patch_callback,
.post_patch = post_patch_callback,
.pre_unpatch = pre_unpatch_callback,
.post_unpatch = post_unpatch_callback,
},
}, { }
};
static struct klp_state states[] = {
{
.id = CONSOLE_LOGLEVEL_STATE,
.version = CONSOLE_LOGLEVEL_STATE_VERSION,
}, { }
};
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
.states = states,
.replace = true,
};
static int test_klp_callbacks_demo_init(void)
{
return klp_enable_patch(&patch);
}
static void test_klp_callbacks_demo_exit(void)
{
}
module_init(test_klp_callbacks_demo_init);
module_exit(test_klp_callbacks_demo_exit);
MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
MODULE_AUTHOR("Petr Mladek <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: system state modification");
| linux-master | lib/livepatch/test_klp_state.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Joe Lawrence <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
static int test_klp_callbacks_mod_init(void)
{
pr_info("%s\n", __func__);
return 0;
}
static void test_klp_callbacks_mod_exit(void)
{
pr_info("%s\n", __func__);
}
module_init(test_klp_callbacks_mod_init);
module_exit(test_klp_callbacks_mod_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joe Lawrence <[email protected]>");
MODULE_DESCRIPTION("Livepatch test: target module");
| linux-master | lib/livepatch/test_klp_callbacks_mod.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 SUSE
/* The console loglevel fix is the same in the next cumulative patch. */
#include "test_klp_state2.c"
| linux-master | lib/livepatch/test_klp_state3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic userspace implementations of gettimeofday() and similar.
*/
#include <vdso/datapage.h>
#include <vdso/helpers.h>
#ifndef vdso_calc_delta
/*
* Default implementation which works for all sane clocksources. That
* obviously excludes x86/TSC.
*/
static __always_inline
u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
{
return ((cycles - last) & mask) * mult;
}
#endif
#ifndef vdso_shift_ns
static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
{
return ns >> shift;
}
#endif
#ifndef __arch_vdso_hres_capable
static inline bool __arch_vdso_hres_capable(void)
{
return true;
}
#endif
#ifndef vdso_clocksource_ok
static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
{
return vd->clock_mode != VDSO_CLOCKMODE_NONE;
}
#endif
#ifndef vdso_cycles_ok
static inline bool vdso_cycles_ok(u64 cycles)
{
return true;
}
#endif
#ifdef CONFIG_TIME_NS
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_data *vd;
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
u64 cycles, last, ns;
u32 seq;
s64 sec;
vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
vd = __arch_get_timens_vdso_data(vd);
if (clk != CLOCK_MONOTONIC_RAW)
vd = &vd[CS_HRES_COARSE];
else
vd = &vd[CS_RAW];
vdso_ts = &vd->basetime[clk];
do {
seq = vdso_read_begin(vd);
if (unlikely(!vdso_clocksource_ok(vd)))
return -1;
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
ns = vdso_ts->nsec;
last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
/* Add the namespace offset */
sec += offs->sec;
ns += offs->nsec;
/*
* Do this outside the loop: a race inside the loop could result
* in __iter_div_u64_rem() being extremely slow.
*/
ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
#else
static __always_inline
const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
return NULL;
}
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
return -EINVAL;
}
#endif
static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
u64 cycles, last, sec, ns;
u32 seq;
/* Allows to compile the high resolution parts out */
if (!__arch_vdso_hres_capable())
return -1;
do {
/*
* Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
* enabled tasks have a special VVAR page installed which
* has vd->seq set to 1 and vd->clock_mode set to
* VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
* this does not affect performance because if vd->seq is
* odd, i.e. a concurrent update is in progress the extra
* check for vd->clock_mode is just a few extra
* instructions while spin waiting for vd->seq to become
* even again.
*/
while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_hres_timens(vd, clk, ts);
cpu_relax();
}
smp_rmb();
if (unlikely(!vdso_clocksource_ok(vd)))
return -1;
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
ns = vdso_ts->nsec;
last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns = vdso_shift_ns(ns, vd->shift);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
/*
* Do this outside the loop: a race inside the loop could result
* in __iter_div_u64_rem() being extremely slow.
*/
ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
#ifdef CONFIG_TIME_NS
static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
const struct timens_offset *offs = &vdns->offset[clk];
u64 nsec;
s64 sec;
s32 seq;
do {
seq = vdso_read_begin(vd);
sec = vdso_ts->sec;
nsec = vdso_ts->nsec;
} while (unlikely(vdso_read_retry(vd, seq)));
/* Add the namespace offset */
sec += offs->sec;
nsec += offs->nsec;
/*
* Do this outside the loop: a race inside the loop could result
* in __iter_div_u64_rem() being extremely slow.
*/
ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
ts->tv_nsec = nsec;
return 0;
}
#else
static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
return -1;
}
#endif
static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
u32 seq;
do {
/*
* Open coded to handle VDSO_CLOCK_TIMENS. See comment in
* do_hres().
*/
while ((seq = READ_ONCE(vd->seq)) & 1) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
return do_coarse_timens(vd, clk, ts);
cpu_relax();
}
smp_rmb();
ts->tv_sec = vdso_ts->sec;
ts->tv_nsec = vdso_ts->nsec;
} while (unlikely(vdso_read_retry(vd, seq)));
return 0;
}
static __always_inline int
__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
u32 msk;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
return -1;
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly.
*/
msk = 1U << clock;
if (likely(msk & VDSO_HRES))
vd = &vd[CS_HRES_COARSE];
else if (msk & VDSO_COARSE)
return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
else if (msk & VDSO_RAW)
vd = &vd[CS_RAW];
else
return -1;
return do_hres(vd, clock, ts);
}
static __maybe_unused int
__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
int ret = __cvdso_clock_gettime_common(vd, clock, ts);
if (unlikely(ret))
return clock_gettime_fallback(clock, ts);
return 0;
}
static __maybe_unused int
__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
}
#ifdef BUILD_VDSO32
static __maybe_unused int
__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
ret = __cvdso_clock_gettime_common(vd, clock, &ts);
if (unlikely(ret))
return clock_gettime32_fallback(clock, res);
/* For ret == 0 */
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
return ret;
}
static __maybe_unused int
__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
{
return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
}
#endif /* BUILD_VDSO32 */
static __maybe_unused int
__cvdso_gettimeofday_data(const struct vdso_data *vd,
struct __kernel_old_timeval *tv, struct timezone *tz)
{
if (likely(tv != NULL)) {
struct __kernel_timespec ts;
if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
return gettimeofday_fallback(tv, tz);
tv->tv_sec = ts.tv_sec;
tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
}
if (unlikely(tz != NULL)) {
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data(vd);
tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
}
return 0;
}
static __maybe_unused int
__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
}
#ifdef VDSO_HAS_TIME
static __maybe_unused __kernel_old_time_t
__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
{
__kernel_old_time_t t;
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data(vd);
t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
if (time)
*time = t;
return t;
}
static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
{
return __cvdso_time_data(__arch_get_vdso_data(), time);
}
#endif /* VDSO_HAS_TIME */
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
u32 msk;
u64 ns;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
return -1;
if (IS_ENABLED(CONFIG_TIME_NS) &&
vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
vd = __arch_get_timens_vdso_data(vd);
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly.
*/
msk = 1U << clock;
if (msk & (VDSO_HRES | VDSO_RAW)) {
/*
* Preserves the behaviour of posix_get_hrtimer_res().
*/
ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
} else if (msk & VDSO_COARSE) {
/*
* Preserves the behaviour of posix_get_coarse_res().
*/
ns = LOW_RES_NSEC;
} else {
return -1;
}
if (likely(res)) {
res->tv_sec = 0;
res->tv_nsec = ns;
}
return 0;
}
static __maybe_unused
int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
struct __kernel_timespec *res)
{
int ret = __cvdso_clock_getres_common(vd, clock, res);
if (unlikely(ret))
return clock_getres_fallback(clock, res);
return 0;
}
static __maybe_unused
int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
{
return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
}
#ifdef BUILD_VDSO32
static __maybe_unused int
__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
struct old_timespec32 *res)
{
struct __kernel_timespec ts;
int ret;
ret = __cvdso_clock_getres_common(vd, clock, &ts);
if (unlikely(ret))
return clock_getres32_fallback(clock, res);
if (likely(res)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
return ret;
}
static __maybe_unused int
__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
{
return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
clock, res);
}
#endif /* BUILD_VDSO32 */
#endif /* VDSO_HAS_CLOCK_GETRES */
| linux-master | lib/vdso/gettimeofday.c |
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/dim.h>
bool dim_on_top(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
return true;
case DIM_GOING_RIGHT:
return (dim->steps_left > 1) && (dim->steps_right == 1);
default: /* DIM_GOING_LEFT */
return (dim->steps_right > 1) && (dim->steps_left == 1);
}
}
EXPORT_SYMBOL(dim_on_top);
void dim_turn(struct dim *dim)
{
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
break;
case DIM_GOING_RIGHT:
dim->tune_state = DIM_GOING_LEFT;
dim->steps_left = 0;
break;
case DIM_GOING_LEFT:
dim->tune_state = DIM_GOING_RIGHT;
dim->steps_right = 0;
break;
}
}
EXPORT_SYMBOL(dim_turn);
void dim_park_on_top(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
dim->tired = 0;
dim->tune_state = DIM_PARKING_ON_TOP;
}
EXPORT_SYMBOL(dim_park_on_top);
void dim_park_tired(struct dim *dim)
{
dim->steps_right = 0;
dim->steps_left = 0;
dim->tune_state = DIM_PARKING_TIRED;
}
EXPORT_SYMBOL(dim_park_tired);
bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats)
{
/* u32 holds up to 71 minutes, should be enough */
u32 delta_us = ktime_us_delta(end->time, start->time);
u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
start->byte_ctr);
u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr,
start->comp_ctr);
if (!delta_us)
return false;
curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
delta_us);
curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
if (curr_stats->epms != 0)
curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
curr_stats->cpms * 100, curr_stats->epms);
else
curr_stats->cpe_ratio = 0;
return true;
}
EXPORT_SYMBOL(dim_calc_stats);
| linux-master | lib/dim/dim.c |
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/dim.h>
static int rdma_dim_step(struct dim *dim)
{
if (dim->tune_state == DIM_GOING_RIGHT) {
if (dim->profile_ix == (RDMA_DIM_PARAMS_NUM_PROFILES - 1))
return DIM_ON_EDGE;
dim->profile_ix++;
dim->steps_right++;
}
if (dim->tune_state == DIM_GOING_LEFT) {
if (dim->profile_ix == 0)
return DIM_ON_EDGE;
dim->profile_ix--;
dim->steps_left++;
}
return DIM_STEPPED;
}
static int rdma_dim_stats_compare(struct dim_stats *curr,
struct dim_stats *prev)
{
/* first stat */
if (!prev->cpms)
return DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->cpms, prev->cpms))
return (curr->cpms > prev->cpms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
if (IS_SIGNIFICANT_DIFF(curr->cpe_ratio, prev->cpe_ratio))
return (curr->cpe_ratio > prev->cpe_ratio) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
return DIM_STATS_SAME;
}
static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
{
int prev_ix = dim->profile_ix;
u8 state = dim->tune_state;
int stats_res;
int step_res;
if (state != DIM_PARKING_ON_TOP && state != DIM_PARKING_TIRED) {
stats_res = rdma_dim_stats_compare(curr_stats,
&dim->prev_stats);
switch (stats_res) {
case DIM_STATS_SAME:
if (curr_stats->cpe_ratio <= 50 * prev_ix)
dim->profile_ix = 0;
break;
case DIM_STATS_WORSE:
dim_turn(dim);
fallthrough;
case DIM_STATS_BETTER:
step_res = rdma_dim_step(dim);
if (step_res == DIM_ON_EDGE)
dim_turn(dim);
break;
}
}
dim->prev_stats = *curr_stats;
return dim->profile_ix != prev_ix;
}
void rdma_dim(struct dim *dim, u64 completions)
{
struct dim_sample *curr_sample = &dim->measuring_sample;
struct dim_stats curr_stats;
u32 nevents;
dim_update_sample_with_comps(curr_sample->event_ctr + 1, 0, 0,
curr_sample->comp_ctr + completions,
&dim->measuring_sample);
switch (dim->state) {
case DIM_MEASURE_IN_PROGRESS:
nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
if (nevents < DIM_NEVENTS)
break;
if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats))
break;
if (rdma_dim_decision(&curr_stats, dim)) {
dim->state = DIM_APPLY_NEW_PROFILE;
schedule_work(&dim->work);
break;
}
fallthrough;
case DIM_START_MEASURE:
dim->state = DIM_MEASURE_IN_PROGRESS;
dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0,
curr_sample->comp_ctr,
&dim->start_sample);
break;
case DIM_APPLY_NEW_PROFILE:
break;
}
}
EXPORT_SYMBOL(rdma_dim);
| linux-master | lib/dim/rdma_dim.c |
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/dim.h>
/*
* Net DIM profiles:
* There are different set of profiles for each CQ period mode.
* There are different set of profiles for RX/TX CQs.
* Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
*/
#define NET_DIM_PARAMS_NUM_PROFILES 5
#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
#define NET_DIM_DEF_PROFILE_CQE 1
#define NET_DIM_DEF_PROFILE_EQE 1
#define NET_DIM_RX_EQE_PROFILES { \
{.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
{.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
{.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
{.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
{.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_RX_CQE_PROFILES { \
{.usec = 2, .pkts = 256,}, \
{.usec = 8, .pkts = 128,}, \
{.usec = 16, .pkts = 64,}, \
{.usec = 32, .pkts = 64,}, \
{.usec = 64, .pkts = 64,} \
}
#define NET_DIM_TX_EQE_PROFILES { \
{.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
{.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
{.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
{.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
{.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \
}
#define NET_DIM_TX_CQE_PROFILES { \
{.usec = 5, .pkts = 128,}, \
{.usec = 8, .pkts = 64,}, \
{.usec = 16, .pkts = 32,}, \
{.usec = 32, .pkts = 32,}, \
{.usec = 64, .pkts = 32,} \
}
static const struct dim_cq_moder
rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_RX_EQE_PROFILES,
NET_DIM_RX_CQE_PROFILES,
};
static const struct dim_cq_moder
tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
NET_DIM_TX_EQE_PROFILES,
NET_DIM_TX_CQE_PROFILES,
};
struct dim_cq_moder
net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
EXPORT_SYMBOL(net_dim_get_rx_moderation);
struct dim_cq_moder
net_dim_get_def_rx_moderation(u8 cq_period_mode)
{
u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
}
EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
struct dim_cq_moder
net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
{
struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
EXPORT_SYMBOL(net_dim_get_tx_moderation);
struct dim_cq_moder
net_dim_get_def_tx_moderation(u8 cq_period_mode)
{
u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
}
EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
static int net_dim_step(struct dim *dim)
{
if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
return DIM_TOO_TIRED;
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
case DIM_PARKING_TIRED:
break;
case DIM_GOING_RIGHT:
if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
return DIM_ON_EDGE;
dim->profile_ix++;
dim->steps_right++;
break;
case DIM_GOING_LEFT:
if (dim->profile_ix == 0)
return DIM_ON_EDGE;
dim->profile_ix--;
dim->steps_left++;
break;
}
dim->tired++;
return DIM_STEPPED;
}
static void net_dim_exit_parking(struct dim *dim)
{
dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
net_dim_step(dim);
}
static int net_dim_stats_compare(struct dim_stats *curr,
struct dim_stats *prev)
{
if (!prev->bpms)
return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
if (!prev->ppms)
return curr->ppms ? DIM_STATS_BETTER :
DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
if (!prev->epms)
return DIM_STATS_SAME;
if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
DIM_STATS_WORSE;
return DIM_STATS_SAME;
}
static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
{
int prev_state = dim->tune_state;
int prev_ix = dim->profile_ix;
int stats_res;
int step_res;
switch (dim->tune_state) {
case DIM_PARKING_ON_TOP:
stats_res = net_dim_stats_compare(curr_stats,
&dim->prev_stats);
if (stats_res != DIM_STATS_SAME)
net_dim_exit_parking(dim);
break;
case DIM_PARKING_TIRED:
dim->tired--;
if (!dim->tired)
net_dim_exit_parking(dim);
break;
case DIM_GOING_RIGHT:
case DIM_GOING_LEFT:
stats_res = net_dim_stats_compare(curr_stats,
&dim->prev_stats);
if (stats_res != DIM_STATS_BETTER)
dim_turn(dim);
if (dim_on_top(dim)) {
dim_park_on_top(dim);
break;
}
step_res = net_dim_step(dim);
switch (step_res) {
case DIM_ON_EDGE:
dim_park_on_top(dim);
break;
case DIM_TOO_TIRED:
dim_park_tired(dim);
break;
}
break;
}
if (prev_state != DIM_PARKING_ON_TOP ||
dim->tune_state != DIM_PARKING_ON_TOP)
dim->prev_stats = *curr_stats;
return dim->profile_ix != prev_ix;
}
void net_dim(struct dim *dim, struct dim_sample end_sample)
{
struct dim_stats curr_stats;
u16 nevents;
switch (dim->state) {
case DIM_MEASURE_IN_PROGRESS:
nevents = BIT_GAP(BITS_PER_TYPE(u16),
end_sample.event_ctr,
dim->start_sample.event_ctr);
if (nevents < DIM_NEVENTS)
break;
if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats))
break;
if (net_dim_decision(&curr_stats, dim)) {
dim->state = DIM_APPLY_NEW_PROFILE;
schedule_work(&dim->work);
break;
}
fallthrough;
case DIM_START_MEASURE:
dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
end_sample.byte_ctr, &dim->start_sample);
dim->state = DIM_MEASURE_IN_PROGRESS;
break;
case DIM_APPLY_NEW_PROFILE:
break;
}
}
EXPORT_SYMBOL(net_dim);
| linux-master | lib/dim/net_dim.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/reboot.h>
#include <kunit/test.h>
#include <kunit/attributes.h>
#include <linux/glob.h>
#include <linux/moduleparam.h>
/*
* These symbols point to the .kunit_test_suites section and are defined in
* include/asm-generic/vmlinux.lds.h, and consequently must be extern.
*/
extern struct kunit_suite * const __kunit_suites_start[];
extern struct kunit_suite * const __kunit_suites_end[];
static char *action_param;
module_param_named(action, action_param, charp, 0400);
MODULE_PARM_DESC(action,
"Changes KUnit executor behavior, valid values are:\n"
"<none>: run the tests like normal\n"
"'list' to list test names instead of running them.\n"
"'list_attr' to list test names and attributes instead of running them.\n");
const char *kunit_action(void)
{
return action_param;
}
static char *filter_glob_param;
static char *filter_param;
static char *filter_action_param;
module_param_named(filter_glob, filter_glob_param, charp, 0400);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
module_param_named(filter, filter_param, charp, 0400);
MODULE_PARM_DESC(filter,
"Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
module_param_named(filter_action, filter_action_param, charp, 0400);
MODULE_PARM_DESC(filter_action,
"Changes behavior of filtered tests using attributes, valid values are:\n"
"<none>: do not run filtered tests as normal\n"
"'skip': skip all filtered tests instead so tests will appear in output\n");
const char *kunit_filter_glob(void)
{
return filter_glob_param;
}
char *kunit_filter(void)
{
return filter_param;
}
char *kunit_filter_action(void)
{
return filter_action_param;
}
/* glob_match() needs NULL terminated strings, so we need a copy of filter_glob_param. */
struct kunit_glob_filter {
char *suite_glob;
char *test_glob;
};
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
const int len = strlen(filter_glob);
const char *period = strchr(filter_glob, '.');
if (!period) {
parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
if (!parsed->suite_glob)
return -ENOMEM;
parsed->test_glob = NULL;
strcpy(parsed->suite_glob, filter_glob);
return 0;
}
parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
if (!parsed->suite_glob)
return -ENOMEM;
parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
if (!parsed->test_glob) {
kfree(parsed->suite_glob);
return -ENOMEM;
}
strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
return 0;
}
/* Create a copy of suite with only tests that match test_glob. */
static struct kunit_suite *
kunit_filter_glob_tests(const struct kunit_suite *const suite, const char *test_glob)
{
int n = 0;
struct kunit_case *filtered, *test_case;
struct kunit_suite *copy;
kunit_suite_for_each_test_case(suite, test_case) {
if (!test_glob || glob_match(test_glob, test_case->name))
++n;
}
if (n == 0)
return NULL;
copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
if (!copy)
return ERR_PTR(-ENOMEM);
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
if (!filtered) {
kfree(copy);
return ERR_PTR(-ENOMEM);
}
n = 0;
kunit_suite_for_each_test_case(suite, test_case) {
if (!test_glob || glob_match(test_glob, test_case->name))
filtered[n++] = *test_case;
}
copy->test_cases = filtered;
return copy;
}
void kunit_free_suite_set(struct kunit_suite_set suite_set)
{
struct kunit_suite * const *suites;
for (suites = suite_set.start; suites < suite_set.end; suites++)
kfree(*suites);
kfree(suite_set.start);
}
struct kunit_suite_set
kunit_filter_suites(const struct kunit_suite_set *suite_set,
const char *filter_glob,
char *filters,
char *filter_action,
int *err)
{
int i, j, k;
int filter_count = 0;
struct kunit_suite **copy, **copy_start, *filtered_suite, *new_filtered_suite;
struct kunit_suite_set filtered = {NULL, NULL};
struct kunit_glob_filter parsed_glob;
struct kunit_attr_filter *parsed_filters = NULL;
const size_t max = suite_set->end - suite_set->start;
copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
if (!copy) { /* won't be able to run anything, return an empty set */
return filtered;
}
copy_start = copy;
if (filter_glob) {
*err = kunit_parse_glob_filter(&parsed_glob, filter_glob);
if (*err)
goto free_copy;
}
/* Parse attribute filters */
if (filters) {
filter_count = kunit_get_filter_count(filters);
parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
if (!parsed_filters) {
*err = -ENOMEM;
goto free_parsed_glob;
}
for (j = 0; j < filter_count; j++)
parsed_filters[j] = kunit_next_attr_filter(&filters, err);
if (*err)
goto free_parsed_filters;
}
for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
filtered_suite = suite_set->start[i];
if (filter_glob) {
if (!glob_match(parsed_glob.suite_glob, filtered_suite->name))
continue;
filtered_suite = kunit_filter_glob_tests(filtered_suite,
parsed_glob.test_glob);
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
goto free_parsed_filters;
}
}
if (filter_count > 0 && parsed_filters != NULL) {
for (k = 0; k < filter_count; k++) {
new_filtered_suite = kunit_filter_attr_tests(filtered_suite,
parsed_filters[k], filter_action, err);
/* Free previous copy of suite */
if (k > 0 || filter_glob) {
kfree(filtered_suite->test_cases);
kfree(filtered_suite);
}
filtered_suite = new_filtered_suite;
if (*err)
goto free_parsed_filters;
if (IS_ERR(filtered_suite)) {
*err = PTR_ERR(filtered_suite);
goto free_parsed_filters;
}
if (!filtered_suite)
break;
}
}
if (!filtered_suite)
continue;
*copy++ = filtered_suite;
}
filtered.start = copy_start;
filtered.end = copy;
free_parsed_filters:
if (filter_count)
kfree(parsed_filters);
free_parsed_glob:
if (filter_glob) {
kfree(parsed_glob.suite_glob);
kfree(parsed_glob.test_glob);
}
free_copy:
if (*err)
kfree(copy);
return filtered;
}
void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin)
{
size_t num_suites = suite_set->end - suite_set->start;
if (builtin || num_suites) {
pr_info("KTAP version 1\n");
pr_info("1..%zu\n", num_suites);
}
__kunit_test_suites_init(suite_set->start, num_suites);
}
void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
{
struct kunit_suite * const *suites;
struct kunit_case *test_case;
/* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
pr_info("KTAP version 1\n");
for (suites = suite_set->start; suites < suite_set->end; suites++) {
/* Print suite name and suite attributes */
pr_info("%s\n", (*suites)->name);
if (include_attr)
kunit_print_attr((void *)(*suites), false, 0);
/* Print test case name and attributes in suite */
kunit_suite_for_each_test_case((*suites), test_case) {
pr_info("%s.%s\n", (*suites)->name, test_case->name);
if (include_attr)
kunit_print_attr((void *)test_case, true, 0);
}
}
}
#if IS_BUILTIN(CONFIG_KUNIT)
static char *kunit_shutdown;
core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
static void kunit_handle_shutdown(void)
{
if (!kunit_shutdown)
return;
if (!strcmp(kunit_shutdown, "poweroff"))
kernel_power_off();
else if (!strcmp(kunit_shutdown, "halt"))
kernel_halt();
else if (!strcmp(kunit_shutdown, "reboot"))
kernel_restart(NULL);
}
int kunit_run_all_tests(void)
{
struct kunit_suite_set suite_set = {
__kunit_suites_start, __kunit_suites_end,
};
int err = 0;
if (!kunit_enabled()) {
pr_info("kunit: disabled\n");
goto out;
}
if (filter_glob_param || filter_param) {
suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
filter_param, filter_action_param, &err);
if (err) {
pr_err("kunit executor: error filtering suites: %d\n", err);
goto out;
}
}
if (!action_param)
kunit_exec_run_tests(&suite_set, true);
else if (strcmp(action_param, "list") == 0)
kunit_exec_list_tests(&suite_set, false);
else if (strcmp(action_param, "list_attr") == 0)
kunit_exec_list_tests(&suite_set, true);
else
pr_err("kunit executor: unknown action '%s'\n", action_param);
if (filter_glob_param || filter_param) { /* a copy was made of each suite */
kunit_free_suite_set(suite_set);
}
out:
kunit_handle_shutdown();
return err;
}
#if IS_BUILTIN(CONFIG_KUNIT_TEST)
#include "executor_test.c"
#endif
#endif /* IS_BUILTIN(CONFIG_KUNIT) */
| linux-master | lib/kunit/executor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit resource API for test managed resources (allocations, etc.).
*
* Copyright (C) 2022, Google LLC.
* Author: Daniel Latypov <[email protected]>
*/
#include <kunit/resource.h>
#include <kunit/test.h>
#include <linux/kref.h>
/*
* Used for static resources and when a kunit_resource * has been created by
* kunit_alloc_resource(). When an init function is supplied, @data is passed
* into the init function; otherwise, we simply set the resource data field to
* the data value passed in. Doesn't initialize res->should_kfree.
*/
int __kunit_add_resource(struct kunit *test,
kunit_resource_init_t init,
kunit_resource_free_t free,
struct kunit_resource *res,
void *data)
{
int ret = 0;
unsigned long flags;
res->free = free;
kref_init(&res->refcount);
if (init) {
ret = init(res, data);
if (ret)
return ret;
} else {
res->data = data;
}
spin_lock_irqsave(&test->lock, flags);
list_add_tail(&res->node, &test->resources);
/* refcount for list is established by kref_init() */
spin_unlock_irqrestore(&test->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(__kunit_add_resource);
void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
{
unsigned long flags;
bool was_linked;
spin_lock_irqsave(&test->lock, flags);
was_linked = !list_empty(&res->node);
list_del_init(&res->node);
spin_unlock_irqrestore(&test->lock, flags);
if (was_linked)
kunit_put_resource(res);
}
EXPORT_SYMBOL_GPL(kunit_remove_resource);
int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
void *match_data)
{
struct kunit_resource *res = kunit_find_resource(test, match,
match_data);
if (!res)
return -ENOENT;
kunit_remove_resource(test, res);
/* We have a reference also via _find(); drop it. */
kunit_put_resource(res);
return 0;
}
EXPORT_SYMBOL_GPL(kunit_destroy_resource);
struct kunit_action_ctx {
struct kunit_resource res;
kunit_action_t *func;
void *ctx;
};
static void __kunit_action_free(struct kunit_resource *res)
{
struct kunit_action_ctx *action_ctx = container_of(res, struct kunit_action_ctx, res);
action_ctx->func(action_ctx->ctx);
}
int kunit_add_action(struct kunit *test, void (*action)(void *), void *ctx)
{
struct kunit_action_ctx *action_ctx;
KUNIT_ASSERT_NOT_NULL_MSG(test, action, "Tried to action a NULL function!");
action_ctx = kzalloc(sizeof(*action_ctx), GFP_KERNEL);
if (!action_ctx)
return -ENOMEM;
action_ctx->func = action;
action_ctx->ctx = ctx;
action_ctx->res.should_kfree = true;
/* As init is NULL, this cannot fail. */
__kunit_add_resource(test, NULL, __kunit_action_free, &action_ctx->res, action_ctx);
return 0;
}
EXPORT_SYMBOL_GPL(kunit_add_action);
int kunit_add_action_or_reset(struct kunit *test, void (*action)(void *),
void *ctx)
{
int res = kunit_add_action(test, action, ctx);
if (res)
action(ctx);
return res;
}
EXPORT_SYMBOL_GPL(kunit_add_action_or_reset);
static bool __kunit_action_match(struct kunit *test,
struct kunit_resource *res, void *match_data)
{
struct kunit_action_ctx *match_ctx = (struct kunit_action_ctx *)match_data;
struct kunit_action_ctx *res_ctx = container_of(res, struct kunit_action_ctx, res);
/* Make sure this is a free function. */
if (res->free != __kunit_action_free)
return false;
/* Both the function and context data should match. */
return (match_ctx->func == res_ctx->func) && (match_ctx->ctx == res_ctx->ctx);
}
void kunit_remove_action(struct kunit *test,
kunit_action_t *action,
void *ctx)
{
struct kunit_action_ctx match_ctx;
struct kunit_resource *res;
match_ctx.func = action;
match_ctx.ctx = ctx;
res = kunit_find_resource(test, __kunit_action_match, &match_ctx);
if (res) {
/* Remove the free function so we don't run the action. */
res->free = NULL;
kunit_remove_resource(test, res);
kunit_put_resource(res);
}
}
EXPORT_SYMBOL_GPL(kunit_remove_action);
void kunit_release_action(struct kunit *test,
kunit_action_t *action,
void *ctx)
{
struct kunit_action_ctx match_ctx;
struct kunit_resource *res;
match_ctx.func = action;
match_ctx.ctx = ctx;
res = kunit_find_resource(test, __kunit_action_match, &match_ctx);
if (res) {
kunit_remove_resource(test, res);
/* We have to put() this here, else free won't be called. */
kunit_put_resource(res);
}
}
EXPORT_SYMBOL_GPL(kunit_release_action);
| linux-master | lib/kunit/resource.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit test for the KUnit executor.
*
* Copyright (C) 2021, Google LLC.
* Author: Daniel Latypov <[email protected]>
*/
#include <kunit/test.h>
#include <kunit/attributes.h>
static void kfree_at_end(struct kunit *test, const void *to_free);
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
const char *suite_name,
struct kunit_case *test_cases);
static void dummy_test(struct kunit *test) {}
static struct kunit_case dummy_test_cases[] = {
/* .run_case is not important, just needs to be non-NULL */
{ .name = "test1", .run_case = dummy_test },
{ .name = "test2", .run_case = dummy_test },
{},
};
static void parse_filter_test(struct kunit *test)
{
struct kunit_glob_filter filter = {NULL, NULL};
kunit_parse_glob_filter(&filter, "suite");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_FALSE(test, filter.test_glob);
kfree(filter.suite_glob);
kfree(filter.test_glob);
kunit_parse_glob_filter(&filter, "suite.test");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_STREQ(test, filter.test_glob, "test");
kfree(filter.suite_glob);
kfree(filter.test_glob);
}
static void filter_suites_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
struct kunit_suite_set suite_set = {
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2, NULL */
got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
/* Validate we just have suite2 */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
/* Contains one element (end is 1 past end) */
KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
}
static void filter_suites_test_glob_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
struct kunit_suite_set suite_set = {
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */
got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
/* Validate we just have suite2 */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
/* Now validate we just have test2 */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->test_cases[0].name, "test2");
KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
}
static void filter_suites_to_empty_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
struct kunit_suite_set suite_set = {
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start); /* just in case */
KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
"should be empty to indicate no match");
}
static void parse_filter_attr_test(struct kunit *test)
{
int j, filter_count;
struct kunit_attr_filter *parsed_filters;
char filters[] = "speed>slow, module!=example", *filter = filters;
int err = 0;
filter_count = kunit_get_filter_count(filters);
KUNIT_EXPECT_EQ(test, filter_count, 2);
parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
GFP_KERNEL);
for (j = 0; j < filter_count; j++) {
parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
}
KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
KUNIT_EXPECT_STREQ(test, parsed_filters[0].input, ">slow");
KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[1]), "module");
KUNIT_EXPECT_STREQ(test, parsed_filters[1].input, "!=example");
}
static struct kunit_case dummy_attr_test_cases[] = {
/* .run_case is not important, just needs to be non-NULL */
{ .name = "slow", .run_case = dummy_test, .module_name = "dummy",
.attr.speed = KUNIT_SPEED_SLOW },
{ .name = "normal", .run_case = dummy_test, .module_name = "dummy" },
{},
};
static void filter_attr_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
struct kunit_suite_set suite_set = {
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
subsuite[1] = alloc_fake_suite(test, "slow_suite", dummy_attr_test_cases);
subsuite[1]->attr.speed = KUNIT_SPEED_SLOW; // Set suite attribute
/*
* Want: normal_suite(slow, normal), slow_suite(slow, normal),
* NULL -> normal_suite(normal), NULL
*
* The normal test in slow_suite is filtered out because the speed
* attribute is unset and thus, the filtering is based on the parent attribute
* of slow.
*/
got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
/* Validate we just have normal_suite */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
KUNIT_EXPECT_STREQ(test, got.start[0]->name, "normal_suite");
KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
/* Now validate we just have normal test case */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "normal");
KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
}
static void filter_attr_empty_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
struct kunit_suite_set suite_set = {
.start = subsuite, .end = &subsuite[2],
};
struct kunit_suite_set got;
char filter[] = "module!=dummy";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start); /* just in case */
KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
"should be empty to indicate no match");
}
static void filter_attr_skip_test(struct kunit *test)
{
struct kunit_suite *subsuite[2] = {NULL};
struct kunit_suite_set suite_set = {
.start = subsuite, .end = &subsuite[1],
};
struct kunit_suite_set got;
char filter[] = "speed>slow";
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
/* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
/* Validate we have both the slow and normal test */
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
KUNIT_ASSERT_EQ(test, kunit_suite_num_test_cases(got.start[0]), 2);
KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "slow");
KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[1].name, "normal");
/* Now ensure slow is skipped and normal is not */
KUNIT_EXPECT_EQ(test, got.start[0]->test_cases[0].status, KUNIT_SKIPPED);
KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].status);
}
static struct kunit_case executor_test_cases[] = {
KUNIT_CASE(parse_filter_test),
KUNIT_CASE(filter_suites_test),
KUNIT_CASE(filter_suites_test_glob_test),
KUNIT_CASE(filter_suites_to_empty_test),
KUNIT_CASE(parse_filter_attr_test),
KUNIT_CASE(filter_attr_test),
KUNIT_CASE(filter_attr_empty_test),
KUNIT_CASE(filter_attr_skip_test),
{}
};
static struct kunit_suite executor_test_suite = {
.name = "kunit_executor_test",
.test_cases = executor_test_cases,
};
kunit_test_suites(&executor_test_suite);
/* Test helpers */
/* Use the resource API to register a call to kfree(to_free).
* Since we never actually use the resource, it's safe to use on const data.
*/
static void kfree_at_end(struct kunit *test, const void *to_free)
{
/* kfree() handles NULL already, but avoid allocating a no-op cleanup. */
if (IS_ERR_OR_NULL(to_free))
return;
kunit_add_action(test,
(kunit_action_t *)kfree,
(void *)to_free);
}
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
const char *suite_name,
struct kunit_case *test_cases)
{
struct kunit_suite *suite;
/* We normally never expect to allocate suites, hence the non-const cast. */
suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
suite->test_cases = test_cases;
return suite;
}
| linux-master | lib/kunit/executor_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020, Oracle and/or its affiliates.
* Author: Alan Maguire <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <kunit/test.h>
#include "string-stream.h"
#include "debugfs.h"
#define KUNIT_DEBUGFS_ROOT "kunit"
#define KUNIT_DEBUGFS_RESULTS "results"
/*
* Create a debugfs representation of test suites:
*
* Path Semantics
* /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
* testsuite
*
*/
static struct dentry *debugfs_rootdir;
void kunit_debugfs_cleanup(void)
{
debugfs_remove_recursive(debugfs_rootdir);
}
void kunit_debugfs_init(void)
{
if (!debugfs_rootdir)
debugfs_rootdir = debugfs_create_dir(KUNIT_DEBUGFS_ROOT, NULL);
}
static void debugfs_print_result(struct seq_file *seq,
struct kunit_suite *suite,
struct kunit_case *test_case)
{
if (!test_case || !test_case->log)
return;
seq_printf(seq, "%s", test_case->log);
}
/*
* /sys/kernel/debug/kunit/<testsuite>/results shows all results for testsuite.
*/
static int debugfs_print_results(struct seq_file *seq, void *v)
{
struct kunit_suite *suite = (struct kunit_suite *)seq->private;
enum kunit_status success = kunit_suite_has_succeeded(suite);
struct kunit_case *test_case;
if (!suite)
return 0;
/* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
seq_puts(seq, "KTAP version 1\n");
seq_puts(seq, "1..1\n");
/* Print suite header because it is not stored in the test logs. */
seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name);
seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));
kunit_suite_for_each_test_case(suite, test_case)
debugfs_print_result(seq, suite, test_case);
if (suite->log)
seq_printf(seq, "%s", suite->log);
seq_printf(seq, "%s %d %s\n",
kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
}
static int debugfs_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static int debugfs_results_open(struct inode *inode, struct file *file)
{
struct kunit_suite *suite;
suite = (struct kunit_suite *)inode->i_private;
return single_open(file, debugfs_print_results, suite);
}
static const struct file_operations debugfs_results_fops = {
.open = debugfs_results_open,
.read = seq_read,
.llseek = seq_lseek,
.release = debugfs_release,
};
void kunit_debugfs_create_suite(struct kunit_suite *suite)
{
struct kunit_case *test_case;
/* Allocate logs before creating debugfs representation. */
suite->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
kunit_suite_for_each_test_case(suite, test_case)
test_case->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
suite->debugfs,
suite, &debugfs_results_fops);
}
void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
{
struct kunit_case *test_case;
debugfs_remove_recursive(suite->debugfs);
kfree(suite->log);
kunit_suite_for_each_test_case(suite, test_case)
kfree(test_case->log);
}
| linux-master | lib/kunit/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* An API to allow a function, that may fail, to be executed, and recover in a
* controlled manner.
*
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <[email protected]>
*/
#include <kunit/test.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include "try-catch-impl.h"
void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
{
try_catch->try_result = -EFAULT;
kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
}
EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
static int kunit_generic_run_threadfn_adapter(void *data)
{
struct kunit_try_catch *try_catch = data;
try_catch->try(try_catch->context);
kthread_complete_and_exit(try_catch->try_completion, 0);
}
static unsigned long kunit_test_timeout(void)
{
/*
* TODO([email protected]): We should probably have some type of
* variable timeout here. The only question is what that timeout value
* should be.
*
* The intention has always been, at some point, to be able to label
* tests with some type of size bucket (unit/small, integration/medium,
* large/system/end-to-end, etc), where each size bucket would get a
* default timeout value kind of like what Bazel does:
* https://docs.bazel.build/versions/master/be/common-definitions.html#test.size
* There is still some debate to be had on exactly how we do this. (For
* one, we probably want to have some sort of test runner level
* timeout.)
*
* For more background on this topic, see:
* https://mike-bland.com/2011/11/01/small-medium-large.html
*
* If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
* the task will be killed and an oops generated.
*/
return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */
}
void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
{
DECLARE_COMPLETION_ONSTACK(try_completion);
struct kunit *test = try_catch->test;
struct task_struct *task_struct;
int exit_code, time_remaining;
try_catch->context = context;
try_catch->try_completion = &try_completion;
try_catch->try_result = 0;
task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
try_catch,
"kunit_try_catch_thread");
if (IS_ERR(task_struct)) {
try_catch->catch(try_catch->context);
return;
}
time_remaining = wait_for_completion_timeout(&try_completion,
kunit_test_timeout());
if (time_remaining == 0) {
kunit_err(test, "try timed out\n");
try_catch->try_result = -ETIMEDOUT;
kthread_stop(task_struct);
}
exit_code = try_catch->try_result;
if (!exit_code)
return;
if (exit_code == -EFAULT)
try_catch->try_result = 0;
else if (exit_code == -EINTR)
kunit_err(test, "wake_up_process() was never called\n");
else if (exit_code)
kunit_err(test, "Unknown error: %d\n", exit_code);
try_catch->catch(try_catch->context);
}
EXPORT_SYMBOL_GPL(kunit_try_catch_run);
| linux-master | lib/kunit/try-catch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* C++ stream style string builder used in KUnit for building messages.
*
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <[email protected]>
*/
#include <kunit/test.h>
#include <linux/list.h>
#include <linux/slab.h>
#include "string-stream.h"
static struct string_stream_fragment *alloc_string_stream_fragment(
struct kunit *test, int len, gfp_t gfp)
{
struct string_stream_fragment *frag;
frag = kunit_kzalloc(test, sizeof(*frag), gfp);
if (!frag)
return ERR_PTR(-ENOMEM);
frag->fragment = kunit_kmalloc(test, len, gfp);
if (!frag->fragment) {
kunit_kfree(test, frag);
return ERR_PTR(-ENOMEM);
}
return frag;
}
static void string_stream_fragment_destroy(struct kunit *test,
struct string_stream_fragment *frag)
{
list_del(&frag->node);
kunit_kfree(test, frag->fragment);
kunit_kfree(test, frag);
}
int string_stream_vadd(struct string_stream *stream,
const char *fmt,
va_list args)
{
struct string_stream_fragment *frag_container;
int len;
va_list args_for_counting;
/* Make a copy because `vsnprintf` could change it */
va_copy(args_for_counting, args);
/* Need space for null byte. */
len = vsnprintf(NULL, 0, fmt, args_for_counting) + 1;
va_end(args_for_counting);
frag_container = alloc_string_stream_fragment(stream->test,
len,
stream->gfp);
if (IS_ERR(frag_container))
return PTR_ERR(frag_container);
len = vsnprintf(frag_container->fragment, len, fmt, args);
spin_lock(&stream->lock);
stream->length += len;
list_add_tail(&frag_container->node, &stream->fragments);
spin_unlock(&stream->lock);
return 0;
}
int string_stream_add(struct string_stream *stream, const char *fmt, ...)
{
va_list args;
int result;
va_start(args, fmt);
result = string_stream_vadd(stream, fmt, args);
va_end(args);
return result;
}
static void string_stream_clear(struct string_stream *stream)
{
struct string_stream_fragment *frag_container, *frag_container_safe;
spin_lock(&stream->lock);
list_for_each_entry_safe(frag_container,
frag_container_safe,
&stream->fragments,
node) {
string_stream_fragment_destroy(stream->test, frag_container);
}
stream->length = 0;
spin_unlock(&stream->lock);
}
char *string_stream_get_string(struct string_stream *stream)
{
struct string_stream_fragment *frag_container;
size_t buf_len = stream->length + 1; /* +1 for null byte. */
char *buf;
buf = kunit_kzalloc(stream->test, buf_len, stream->gfp);
if (!buf)
return NULL;
spin_lock(&stream->lock);
list_for_each_entry(frag_container, &stream->fragments, node)
strlcat(buf, frag_container->fragment, buf_len);
spin_unlock(&stream->lock);
return buf;
}
int string_stream_append(struct string_stream *stream,
struct string_stream *other)
{
const char *other_content;
other_content = string_stream_get_string(other);
if (!other_content)
return -ENOMEM;
return string_stream_add(stream, other_content);
}
bool string_stream_is_empty(struct string_stream *stream)
{
return list_empty(&stream->fragments);
}
struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
{
struct string_stream *stream;
stream = kunit_kzalloc(test, sizeof(*stream), gfp);
if (!stream)
return ERR_PTR(-ENOMEM);
stream->gfp = gfp;
stream->test = test;
INIT_LIST_HEAD(&stream->fragments);
spin_lock_init(&stream->lock);
return stream;
}
void string_stream_destroy(struct string_stream *stream)
{
string_stream_clear(stream);
}
| linux-master | lib/kunit/string-stream.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Assertion and expectation serialization API.
*
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <[email protected]>
*/
#include <kunit/assert.h>
#include <kunit/test.h>
#include "string-stream.h"
void kunit_assert_prologue(const struct kunit_loc *loc,
enum kunit_assert_type type,
struct string_stream *stream)
{
const char *expect_or_assert = NULL;
switch (type) {
case KUNIT_EXPECTATION:
expect_or_assert = "EXPECTATION";
break;
case KUNIT_ASSERTION:
expect_or_assert = "ASSERTION";
break;
}
string_stream_add(stream, "%s FAILED at %s:%d\n",
expect_or_assert, loc->file, loc->line);
}
EXPORT_SYMBOL_GPL(kunit_assert_prologue);
static void kunit_assert_print_msg(const struct va_format *message,
struct string_stream *stream)
{
if (message->fmt)
string_stream_add(stream, "\n%pV", message);
}
void kunit_fail_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream)
{
string_stream_add(stream, "%pV", message);
}
EXPORT_SYMBOL_GPL(kunit_fail_assert_format);
void kunit_unary_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream)
{
struct kunit_unary_assert *unary_assert;
unary_assert = container_of(assert, struct kunit_unary_assert, assert);
if (unary_assert->expected_true)
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s to be true, but is false\n",
unary_assert->condition);
else
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s to be false, but is true\n",
unary_assert->condition);
kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream)
{
struct kunit_ptr_not_err_assert *ptr_assert;
ptr_assert = container_of(assert, struct kunit_ptr_not_err_assert,
assert);
if (!ptr_assert->value) {
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
ptr_assert->text);
} else if (IS_ERR(ptr_assert->value)) {
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s is not error, but is: %ld\n",
ptr_assert->text,
PTR_ERR(ptr_assert->value));
}
kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
static bool is_literal(struct kunit *test, const char *text, long long value,
gfp_t gfp)
{
char *buffer;
int len;
bool ret;
len = snprintf(NULL, 0, "%lld", value);
if (strlen(text) != len)
return false;
buffer = kunit_kmalloc(test, len+1, gfp);
if (!buffer)
return false;
snprintf(buffer, len+1, "%lld", value);
ret = strncmp(buffer, text, len) == 0;
kunit_kfree(test, buffer);
return ret;
}
void kunit_binary_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream)
{
struct kunit_binary_assert *binary_assert;
binary_assert = container_of(assert, struct kunit_binary_assert,
assert);
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
binary_assert->text->left_text,
binary_assert->text->operation,
binary_assert->text->right_text);
if (!is_literal(stream->test, binary_assert->text->left_text,
binary_assert->left_value, stream->gfp))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)\n",
binary_assert->text->left_text,
binary_assert->left_value,
binary_assert->left_value);
if (!is_literal(stream->test, binary_assert->text->right_text,
binary_assert->right_value, stream->gfp))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)",
binary_assert->text->right_text,
binary_assert->right_value,
binary_assert->right_value);
kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream)
{
struct kunit_binary_ptr_assert *binary_assert;
binary_assert = container_of(assert, struct kunit_binary_ptr_assert,
assert);
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
binary_assert->text->left_text,
binary_assert->text->operation,
binary_assert->text->right_text);
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px\n",
binary_assert->text->left_text,
binary_assert->left_value);
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px",
binary_assert->text->right_text,
binary_assert->right_value);
kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
/* Checks if KUNIT_EXPECT_STREQ() args were string literals.
* Note: `text` will have ""s where as `value` will not.
*/
static bool is_str_literal(const char *text, const char *value)
{
int len;
len = strlen(text);
if (len < 2)
return false;
if (text[0] != '\"' || text[len - 1] != '\"')
return false;
return strncmp(text + 1, value, len - 2) == 0;
}
void kunit_binary_str_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream)
{
struct kunit_binary_str_assert *binary_assert;
binary_assert = container_of(assert, struct kunit_binary_str_assert,
assert);
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
binary_assert->text->left_text,
binary_assert->text->operation,
binary_assert->text->right_text);
if (!is_str_literal(binary_assert->text->left_text, binary_assert->left_value))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"\n",
binary_assert->text->left_text,
binary_assert->left_value);
if (!is_str_literal(binary_assert->text->right_text, binary_assert->right_value))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"",
binary_assert->text->right_text,
binary_assert->right_value);
kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
/* Adds a hexdump of a buffer to a string_stream comparing it with
* a second buffer. The different bytes are marked with <>.
*/
static void kunit_assert_hexdump(struct string_stream *stream,
const void *buf,
const void *compared_buf,
const size_t len)
{
size_t i;
const u8 *buf1 = buf;
const u8 *buf2 = compared_buf;
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT);
for (i = 0; i < len; ++i) {
if (!(i % 16) && i)
string_stream_add(stream, "\n" KUNIT_SUBSUBTEST_INDENT);
if (buf1[i] != buf2[i])
string_stream_add(stream, "<%02x>", buf1[i]);
else
string_stream_add(stream, " %02x ", buf1[i]);
}
}
void kunit_mem_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream)
{
struct kunit_mem_assert *mem_assert;
mem_assert = container_of(assert, struct kunit_mem_assert,
assert);
if (!mem_assert->left_value) {
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
mem_assert->text->left_text);
} else if (!mem_assert->right_value) {
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
mem_assert->text->right_text);
} else {
string_stream_add(stream,
KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
mem_assert->text->left_text,
mem_assert->text->operation,
mem_assert->text->right_text);
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
mem_assert->text->left_text);
kunit_assert_hexdump(stream, mem_assert->left_value,
mem_assert->right_value, mem_assert->size);
string_stream_add(stream, "\n");
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
mem_assert->text->right_text);
kunit_assert_hexdump(stream, mem_assert->right_value,
mem_assert->left_value, mem_assert->size);
kunit_assert_print_msg(message, stream);
}
}
EXPORT_SYMBOL_GPL(kunit_mem_assert_format);
| linux-master | lib/kunit/assert.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Example KUnit test to show how to use KUnit.
*
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <[email protected]>
*/
#include <kunit/test.h>
#include <kunit/static_stub.h>
/*
* This is the most fundamental element of KUnit, the test case. A test case
* makes a set EXPECTATIONs and ASSERTIONs about the behavior of some code; if
* any expectations or assertions are not met, the test fails; otherwise, the
* test passes.
*
* In KUnit, a test case is just a function with the signature
* `void (*)(struct kunit *)`. `struct kunit` is a context object that stores
* information about the current test.
*/
static void example_simple_test(struct kunit *test)
{
/*
* This is an EXPECTATION; it is how KUnit tests things. When you want
* to test a piece of code, you set some expectations about what the
* code should do. KUnit then runs the test and verifies that the code's
* behavior matched what was expected.
*/
KUNIT_EXPECT_EQ(test, 1 + 1, 2);
}
/*
* This is run once before each test case, see the comment on
* example_test_suite for more information.
*/
static int example_test_init(struct kunit *test)
{
kunit_info(test, "initializing\n");
return 0;
}
/*
* This is run once after each test case, see the comment on
* example_test_suite for more information.
*/
static void example_test_exit(struct kunit *test)
{
kunit_info(test, "cleaning up\n");
}
/*
* This is run once before all test cases in the suite.
* See the comment on example_test_suite for more information.
*/
static int example_test_init_suite(struct kunit_suite *suite)
{
kunit_info(suite, "initializing suite\n");
return 0;
}
/*
* This is run once after all test cases in the suite.
* See the comment on example_test_suite for more information.
*/
static void example_test_exit_suite(struct kunit_suite *suite)
{
kunit_info(suite, "exiting suite\n");
}
/*
* This test should always be skipped.
*/
static void example_skip_test(struct kunit *test)
{
/* This line should run */
kunit_info(test, "You should not see a line below.");
/* Skip (and abort) the test */
kunit_skip(test, "this test should be skipped");
/* This line should not execute */
KUNIT_FAIL(test, "You should not see this line.");
}
/*
* This test should always be marked skipped.
*/
static void example_mark_skipped_test(struct kunit *test)
{
/* This line should run */
kunit_info(test, "You should see a line below.");
/* Skip (but do not abort) the test */
kunit_mark_skipped(test, "this test should be skipped");
/* This line should run */
kunit_info(test, "You should see this line.");
}
/*
* This test shows off all the types of KUNIT_EXPECT macros.
*/
static void example_all_expect_macros_test(struct kunit *test)
{
const u32 array1[] = { 0x0F, 0xFF };
const u32 array2[] = { 0x1F, 0xFF };
/* Boolean assertions */
KUNIT_EXPECT_TRUE(test, true);
KUNIT_EXPECT_FALSE(test, false);
/* Integer assertions */
KUNIT_EXPECT_EQ(test, 1, 1); /* check == */
KUNIT_EXPECT_GE(test, 1, 1); /* check >= */
KUNIT_EXPECT_LE(test, 1, 1); /* check <= */
KUNIT_EXPECT_NE(test, 1, 0); /* check != */
KUNIT_EXPECT_GT(test, 1, 0); /* check > */
KUNIT_EXPECT_LT(test, 0, 1); /* check < */
/* Pointer assertions */
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test);
KUNIT_EXPECT_PTR_EQ(test, NULL, NULL);
KUNIT_EXPECT_PTR_NE(test, test, NULL);
KUNIT_EXPECT_NULL(test, NULL);
KUNIT_EXPECT_NOT_NULL(test, test);
/* String assertions */
KUNIT_EXPECT_STREQ(test, "hi", "hi");
KUNIT_EXPECT_STRNEQ(test, "hi", "bye");
/* Memory block assertions */
KUNIT_EXPECT_MEMEQ(test, array1, array1, sizeof(array1));
KUNIT_EXPECT_MEMNEQ(test, array1, array2, sizeof(array1));
/*
* There are also ASSERT variants of all of the above that abort test
* execution if they fail. Useful for memory allocations, etc.
*/
KUNIT_ASSERT_GT(test, sizeof(char), 0);
/*
* There are also _MSG variants of all of the above that let you include
* additional text on failure.
*/
KUNIT_EXPECT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
KUNIT_ASSERT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
}
/* This is a function we'll replace with static stubs. */
static int add_one(int i)
{
/* This will trigger the stub if active. */
KUNIT_STATIC_STUB_REDIRECT(add_one, i);
return i + 1;
}
/* This is used as a replacement for the above function. */
static int subtract_one(int i)
{
/* We don't need to trigger the stub from the replacement. */
return i - 1;
}
/*
* This test shows the use of static stubs.
*/
static void example_static_stub_test(struct kunit *test)
{
/* By default, function is not stubbed. */
KUNIT_EXPECT_EQ(test, add_one(1), 2);
/* Replace add_one() with subtract_one(). */
kunit_activate_static_stub(test, add_one, subtract_one);
/* add_one() is now replaced. */
KUNIT_EXPECT_EQ(test, add_one(1), 0);
/* Return add_one() to normal. */
kunit_deactivate_static_stub(test, add_one);
KUNIT_EXPECT_EQ(test, add_one(1), 2);
}
static const struct example_param {
int value;
} example_params_array[] = {
{ .value = 2, },
{ .value = 1, },
{ .value = 0, },
};
static void example_param_get_desc(const struct example_param *p, char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "example value %d", p->value);
}
KUNIT_ARRAY_PARAM(example, example_params_array, example_param_get_desc);
/*
* This test shows the use of params.
*/
static void example_params_test(struct kunit *test)
{
const struct example_param *param = test->param_value;
/* By design, param pointer will not be NULL */
KUNIT_ASSERT_NOT_NULL(test, param);
/* Test can be skipped on unsupported param values */
if (!param->value)
kunit_skip(test, "unsupported param value");
/* You can use param values for parameterized testing */
KUNIT_EXPECT_EQ(test, param->value % param->value, 0);
}
/*
* This test should always pass. Can be used to practice filtering attributes.
*/
static void example_slow_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, 1 + 1, 2);
}
/*
* Here we make a list of all the test cases we want to add to the test suite
* below.
*/
static struct kunit_case example_test_cases[] = {
/*
* This is a helper to create a test case object from a test case
* function; its exact function is not important to understand how to
* use KUnit, just know that this is how you associate test cases with a
* test suite.
*/
KUNIT_CASE(example_simple_test),
KUNIT_CASE(example_skip_test),
KUNIT_CASE(example_mark_skipped_test),
KUNIT_CASE(example_all_expect_macros_test),
KUNIT_CASE(example_static_stub_test),
KUNIT_CASE_PARAM(example_params_test, example_gen_params),
KUNIT_CASE_SLOW(example_slow_test),
{}
};
/*
* This defines a suite or grouping of tests.
*
* Test cases are defined as belonging to the suite by adding them to
* `kunit_cases`.
*
* Often it is desirable to run some function which will set up things which
* will be used by every test; this is accomplished with an `init` function
* which runs before each test case is invoked. Similarly, an `exit` function
* may be specified which runs after every test case and can be used to for
* cleanup. For clarity, running tests in a test suite would behave as follows:
*
* suite.suite_init(suite);
* suite.init(test);
* suite.test_case[0](test);
* suite.exit(test);
* suite.init(test);
* suite.test_case[1](test);
* suite.exit(test);
* suite.suite_exit(suite);
* ...;
*/
static struct kunit_suite example_test_suite = {
.name = "example",
.init = example_test_init,
.exit = example_test_exit,
.suite_init = example_test_init_suite,
.suite_exit = example_test_exit_suite,
.test_cases = example_test_cases,
};
/*
* This registers the above test suite telling KUnit that this is a suite of
* tests that need to be run.
*/
kunit_test_suites(&example_test_suite);
MODULE_LICENSE("GPL v2");
| linux-master | lib/kunit/kunit-example-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit function redirection (static stubbing) API.
*
* Copyright (C) 2022, Google LLC.
* Author: David Gow <[email protected]>
*/
#include <kunit/test.h>
#include <kunit/static_stub.h>
#include "hooks-impl.h"
/* Context for a static stub. This is stored in the resource data. */
struct kunit_static_stub_ctx {
void *real_fn_addr;
void *replacement_addr;
};
static void __kunit_static_stub_resource_free(struct kunit_resource *res)
{
kfree(res->data);
}
/* Matching function for kunit_find_resource(). match_data is real_fn_addr. */
static bool __kunit_static_stub_resource_match(struct kunit *test,
struct kunit_resource *res,
void *match_real_fn_addr)
{
/* This pointer is only valid if res is a static stub resource. */
struct kunit_static_stub_ctx *ctx = res->data;
/* Make sure the resource is a static stub resource. */
if (res->free != &__kunit_static_stub_resource_free)
return false;
return ctx->real_fn_addr == match_real_fn_addr;
}
/* Hook to return the address of the replacement function. */
void *__kunit_get_static_stub_address_impl(struct kunit *test, void *real_fn_addr)
{
struct kunit_resource *res;
struct kunit_static_stub_ctx *ctx;
void *replacement_addr;
res = kunit_find_resource(test,
__kunit_static_stub_resource_match,
real_fn_addr);
if (!res)
return NULL;
ctx = res->data;
replacement_addr = ctx->replacement_addr;
kunit_put_resource(res);
return replacement_addr;
}
void kunit_deactivate_static_stub(struct kunit *test, void *real_fn_addr)
{
struct kunit_resource *res;
KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL,
"Tried to deactivate a NULL stub.");
/* Look up the existing stub for this function. */
res = kunit_find_resource(test,
__kunit_static_stub_resource_match,
real_fn_addr);
/* Error out if the stub doesn't exist. */
KUNIT_ASSERT_PTR_NE_MSG(test, res, NULL,
"Tried to deactivate a nonexistent stub.");
/* Free the stub. We 'put' twice, as we got a reference
* from kunit_find_resource()
*/
kunit_remove_resource(test, res);
kunit_put_resource(res);
}
EXPORT_SYMBOL_GPL(kunit_deactivate_static_stub);
/* Helper function for kunit_activate_static_stub(). The macro does
* typechecking, so use it instead.
*/
void __kunit_activate_static_stub(struct kunit *test,
void *real_fn_addr,
void *replacement_addr)
{
struct kunit_static_stub_ctx *ctx;
struct kunit_resource *res;
KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL,
"Tried to activate a stub for function NULL");
/* If the replacement address is NULL, deactivate the stub. */
if (!replacement_addr) {
kunit_deactivate_static_stub(test, replacement_addr);
return;
}
/* Look up any existing stubs for this function, and replace them. */
res = kunit_find_resource(test,
__kunit_static_stub_resource_match,
real_fn_addr);
if (res) {
ctx = res->data;
ctx->replacement_addr = replacement_addr;
/* We got an extra reference from find_resource(), so put it. */
kunit_put_resource(res);
} else {
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
ctx->real_fn_addr = real_fn_addr;
ctx->replacement_addr = replacement_addr;
res = kunit_alloc_resource(test, NULL,
&__kunit_static_stub_resource_free,
GFP_KERNEL, ctx);
}
}
EXPORT_SYMBOL_GPL(__kunit_activate_static_stub);
| linux-master | lib/kunit/static_stub.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit 'Hooks' implementation.
*
* This file contains code / structures which should be built-in even when
* KUnit itself is built as a module.
*
* Copyright (C) 2022, Google LLC.
* Author: David Gow <[email protected]>
*/
#include <kunit/test-bug.h>
DEFINE_STATIC_KEY_FALSE(kunit_running);
EXPORT_SYMBOL(kunit_running);
/* Function pointers for hooks. */
struct kunit_hooks_table kunit_hooks;
EXPORT_SYMBOL(kunit_hooks);
| linux-master | lib/kunit/hooks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Base unit test (KUnit) API.
*
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <[email protected]>
*/
#include <kunit/resource.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include <kunit/attributes.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/panic.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
#include "debugfs.h"
#include "hooks-impl.h"
#include "string-stream.h"
#include "try-catch-impl.h"
/*
* Hook to fail the current test and print an error message to the log.
*/
void __printf(3, 4) __kunit_fail_current_test_impl(const char *file, int line, const char *fmt, ...)
{
va_list args;
int len;
char *buffer;
if (!current->kunit_test)
return;
kunit_set_failure(current->kunit_test);
/* kunit_err() only accepts literals, so evaluate the args first. */
va_start(args, fmt);
len = vsnprintf(NULL, 0, fmt, args) + 1;
va_end(args);
buffer = kunit_kmalloc(current->kunit_test, len, GFP_KERNEL);
if (!buffer)
return;
va_start(args, fmt);
vsnprintf(buffer, len, fmt, args);
va_end(args);
kunit_err(current->kunit_test, "%s:%d: %s", file, line, buffer);
kunit_kfree(current->kunit_test, buffer);
}
/*
* Enable KUnit tests to run.
*/
#ifdef CONFIG_KUNIT_DEFAULT_ENABLED
static bool enable_param = true;
#else
static bool enable_param;
#endif
module_param_named(enable, enable_param, bool, 0);
MODULE_PARM_DESC(enable, "Enable KUnit tests");
/*
* KUnit statistic mode:
* 0 - disabled
* 1 - only when there is more than one subtest
* 2 - enabled
*/
static int kunit_stats_enabled = 1;
module_param_named(stats_enabled, kunit_stats_enabled, int, 0644);
MODULE_PARM_DESC(stats_enabled,
"Print test stats: never (0), only for multiple subtests (1), or always (2)");
struct kunit_result_stats {
unsigned long passed;
unsigned long skipped;
unsigned long failed;
unsigned long total;
};
static bool kunit_should_print_stats(struct kunit_result_stats stats)
{
if (kunit_stats_enabled == 0)
return false;
if (kunit_stats_enabled == 2)
return true;
return (stats.total > 1);
}
static void kunit_print_test_stats(struct kunit *test,
struct kunit_result_stats stats)
{
if (!kunit_should_print_stats(stats))
return;
kunit_log(KERN_INFO, test,
KUNIT_SUBTEST_INDENT
"# %s: pass:%lu fail:%lu skip:%lu total:%lu",
test->name,
stats.passed,
stats.failed,
stats.skipped,
stats.total);
}
/**
* kunit_log_newline() - Add newline to the end of log if one is not
* already present.
* @log: The log to add the newline to.
*/
static void kunit_log_newline(char *log)
{
int log_len, len_left;
log_len = strlen(log);
len_left = KUNIT_LOG_SIZE - log_len - 1;
if (log_len > 0 && log[log_len - 1] != '\n')
strncat(log, "\n", len_left);
}
/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
*/
void kunit_log_append(char *log, const char *fmt, ...)
{
va_list args;
int len, log_len, len_left;
if (!log)
return;
log_len = strlen(log);
len_left = KUNIT_LOG_SIZE - log_len - 1;
if (len_left <= 0)
return;
/* Evaluate length of line to add to log */
va_start(args, fmt);
len = vsnprintf(NULL, 0, fmt, args) + 1;
va_end(args);
/* Print formatted line to the log */
va_start(args, fmt);
vsnprintf(log + log_len, min(len, len_left), fmt, args);
va_end(args);
/* Add newline to end of log if not already present. */
kunit_log_newline(log);
}
EXPORT_SYMBOL_GPL(kunit_log_append);
size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
{
struct kunit_case *test_case;
size_t len = 0;
kunit_suite_for_each_test_case(suite, test_case)
len++;
return len;
}
EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
/* Currently supported test levels */
enum {
KUNIT_LEVEL_SUITE = 0,
KUNIT_LEVEL_CASE,
KUNIT_LEVEL_CASE_PARAM,
};
static void kunit_print_suite_start(struct kunit_suite *suite)
{
/*
* We do not log the test suite header as doing so would
* mean debugfs display would consist of the test suite
* header prior to individual test results.
* Hence directly printk the suite status, and we will
* separately seq_printf() the suite header for the debugfs
* representation.
*/
pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
suite->name);
kunit_print_attr((void *)suite, false, KUNIT_LEVEL_CASE);
pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
kunit_suite_num_test_cases(suite));
}
static void kunit_print_ok_not_ok(struct kunit *test,
unsigned int test_level,
enum kunit_status status,
size_t test_number,
const char *description,
const char *directive)
{
const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
const char *directive_body = (status == KUNIT_SKIPPED) ? directive : "";
/*
* When test is NULL assume that results are from the suite
* and today suite results are expected at level 0 only.
*/
WARN(!test && test_level, "suite test level can't be %u!\n", test_level);
/*
* We do not log the test suite results as doing so would
* mean debugfs display would consist of an incorrect test
* number. Hence directly printk the suite result, and we will
* separately seq_printf() the suite results for the debugfs
* representation.
*/
if (!test)
pr_info("%s %zd %s%s%s\n",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
directive_body);
else
kunit_log(KERN_INFO, test,
"%*s%s %zd %s%s%s",
KUNIT_INDENT_LEN * test_level, "",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
directive_body);
}
enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
enum kunit_status status = KUNIT_SKIPPED;
if (suite->suite_init_err)
return KUNIT_FAILURE;
kunit_suite_for_each_test_case(suite, test_case) {
if (test_case->status == KUNIT_FAILURE)
return KUNIT_FAILURE;
else if (test_case->status == KUNIT_SUCCESS)
status = KUNIT_SUCCESS;
}
return status;
}
EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
static size_t kunit_suite_counter = 1;
static void kunit_print_suite_end(struct kunit_suite *suite)
{
kunit_print_ok_not_ok(NULL, KUNIT_LEVEL_SUITE,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
suite->name,
suite->status_comment);
}
unsigned int kunit_test_case_num(struct kunit_suite *suite,
struct kunit_case *test_case)
{
struct kunit_case *tc;
unsigned int i = 1;
kunit_suite_for_each_test_case(suite, tc) {
if (tc == test_case)
return i;
i++;
}
return 0;
}
EXPORT_SYMBOL_GPL(kunit_test_case_num);
static void kunit_print_string_stream(struct kunit *test,
struct string_stream *stream)
{
struct string_stream_fragment *fragment;
char *buf;
if (string_stream_is_empty(stream))
return;
buf = string_stream_get_string(stream);
if (!buf) {
kunit_err(test,
"Could not allocate buffer, dumping stream:\n");
list_for_each_entry(fragment, &stream->fragments, node) {
kunit_err(test, "%s", fragment->fragment);
}
kunit_err(test, "\n");
} else {
kunit_err(test, "%s", buf);
kunit_kfree(test, buf);
}
}
static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
enum kunit_assert_type type, const struct kunit_assert *assert,
assert_format_t assert_format, const struct va_format *message)
{
struct string_stream *stream;
kunit_set_failure(test);
stream = alloc_string_stream(test, GFP_KERNEL);
if (IS_ERR(stream)) {
WARN(true,
"Could not allocate stream to print failed assertion in %s:%d\n",
loc->file,
loc->line);
return;
}
kunit_assert_prologue(loc, type, stream);
assert_format(assert, message, stream);
kunit_print_string_stream(test, stream);
string_stream_destroy(stream);
}
void __noreturn __kunit_abort(struct kunit *test)
{
kunit_try_catch_throw(&test->try_catch); /* Does not return. */
/*
* Throw could not abort from test.
*
* XXX: we should never reach this line! As kunit_try_catch_throw is
* marked __noreturn.
*/
WARN_ONCE(true, "Throw could not abort from test!\n");
}
EXPORT_SYMBOL_GPL(__kunit_abort);
void __kunit_do_failed_assertion(struct kunit *test,
const struct kunit_loc *loc,
enum kunit_assert_type type,
const struct kunit_assert *assert,
assert_format_t assert_format,
const char *fmt, ...)
{
va_list args;
struct va_format message;
va_start(args, fmt);
message.fmt = fmt;
message.va = &args;
kunit_fail(test, loc, type, assert, assert_format, &message);
va_end(args);
}
EXPORT_SYMBOL_GPL(__kunit_do_failed_assertion);
void kunit_init_test(struct kunit *test, const char *name, char *log)
{
spin_lock_init(&test->lock);
INIT_LIST_HEAD(&test->resources);
test->name = name;
test->log = log;
if (test->log)
test->log[0] = '\0';
test->status = KUNIT_SUCCESS;
test->status_comment[0] = '\0';
}
EXPORT_SYMBOL_GPL(kunit_init_test);
/*
* Initializes and runs test case. Does not clean up or do post validations.
*/
static void kunit_run_case_internal(struct kunit *test,
struct kunit_suite *suite,
struct kunit_case *test_case)
{
if (suite->init) {
int ret;
ret = suite->init(test);
if (ret) {
kunit_err(test, "failed to initialize: %d\n", ret);
kunit_set_failure(test);
return;
}
}
test_case->run_case(test);
}
static void kunit_case_internal_cleanup(struct kunit *test)
{
kunit_cleanup(test);
}
/*
* Performs post validations and cleanup after a test case was run.
* XXX: Should ONLY BE CALLED AFTER kunit_run_case_internal!
*/
static void kunit_run_case_cleanup(struct kunit *test,
struct kunit_suite *suite)
{
if (suite->exit)
suite->exit(test);
kunit_case_internal_cleanup(test);
}
struct kunit_try_catch_context {
struct kunit *test;
struct kunit_suite *suite;
struct kunit_case *test_case;
};
static void kunit_try_run_case(void *data)
{
struct kunit_try_catch_context *ctx = data;
struct kunit *test = ctx->test;
struct kunit_suite *suite = ctx->suite;
struct kunit_case *test_case = ctx->test_case;
current->kunit_test = test;
/*
* kunit_run_case_internal may encounter a fatal error; if it does,
* abort will be called, this thread will exit, and finally the parent
* thread will resume control and handle any necessary clean up.
*/
kunit_run_case_internal(test, suite, test_case);
}
static void kunit_try_run_case_cleanup(void *data)
{
struct kunit_try_catch_context *ctx = data;
struct kunit *test = ctx->test;
struct kunit_suite *suite = ctx->suite;
current->kunit_test = test;
kunit_run_case_cleanup(test, suite);
}
static void kunit_catch_run_case_cleanup(void *data)
{
struct kunit_try_catch_context *ctx = data;
struct kunit *test = ctx->test;
int try_exit_code = kunit_try_catch_get_result(&test->try_catch);
/* It is always a failure if cleanup aborts. */
kunit_set_failure(test);
if (try_exit_code) {
/*
* Test case could not finish, we have no idea what state it is
* in, so don't do clean up.
*/
if (try_exit_code == -ETIMEDOUT) {
kunit_err(test, "test case cleanup timed out\n");
/*
* Unknown internal error occurred preventing test case from
* running, so there is nothing to clean up.
*/
} else {
kunit_err(test, "internal error occurred during test case cleanup: %d\n",
try_exit_code);
}
return;
}
kunit_err(test, "test aborted during cleanup. continuing without cleaning up\n");
}
static void kunit_catch_run_case(void *data)
{
struct kunit_try_catch_context *ctx = data;
struct kunit *test = ctx->test;
int try_exit_code = kunit_try_catch_get_result(&test->try_catch);
if (try_exit_code) {
kunit_set_failure(test);
/*
* Test case could not finish, we have no idea what state it is
* in, so don't do clean up.
*/
if (try_exit_code == -ETIMEDOUT) {
kunit_err(test, "test case timed out\n");
/*
* Unknown internal error occurred preventing test case from
* running, so there is nothing to clean up.
*/
} else {
kunit_err(test, "internal error occurred preventing test case from running: %d\n",
try_exit_code);
}
return;
}
}
/*
* Performs all logic to run a test case. It also catches most errors that
* occur in a test case and reports them as failures.
*/
static void kunit_run_case_catch_errors(struct kunit_suite *suite,
struct kunit_case *test_case,
struct kunit *test)
{
struct kunit_try_catch_context context;
struct kunit_try_catch *try_catch;
try_catch = &test->try_catch;
kunit_try_catch_init(try_catch,
test,
kunit_try_run_case,
kunit_catch_run_case);
context.test = test;
context.suite = suite;
context.test_case = test_case;
kunit_try_catch_run(try_catch, &context);
/* Now run the cleanup */
kunit_try_catch_init(try_catch,
test,
kunit_try_run_case_cleanup,
kunit_catch_run_case_cleanup);
kunit_try_catch_run(try_catch, &context);
/* Propagate the parameter result to the test case. */
if (test->status == KUNIT_FAILURE)
test_case->status = KUNIT_FAILURE;
else if (test_case->status != KUNIT_FAILURE && test->status == KUNIT_SUCCESS)
test_case->status = KUNIT_SUCCESS;
}
static void kunit_print_suite_stats(struct kunit_suite *suite,
struct kunit_result_stats suite_stats,
struct kunit_result_stats param_stats)
{
if (kunit_should_print_stats(suite_stats)) {
kunit_log(KERN_INFO, suite,
"# %s: pass:%lu fail:%lu skip:%lu total:%lu",
suite->name,
suite_stats.passed,
suite_stats.failed,
suite_stats.skipped,
suite_stats.total);
}
if (kunit_should_print_stats(param_stats)) {
kunit_log(KERN_INFO, suite,
"# Totals: pass:%lu fail:%lu skip:%lu total:%lu",
param_stats.passed,
param_stats.failed,
param_stats.skipped,
param_stats.total);
}
}
static void kunit_update_stats(struct kunit_result_stats *stats,
enum kunit_status status)
{
switch (status) {
case KUNIT_SUCCESS:
stats->passed++;
break;
case KUNIT_SKIPPED:
stats->skipped++;
break;
case KUNIT_FAILURE:
stats->failed++;
break;
}
stats->total++;
}
static void kunit_accumulate_stats(struct kunit_result_stats *total,
struct kunit_result_stats add)
{
total->passed += add.passed;
total->skipped += add.skipped;
total->failed += add.failed;
total->total += add.total;
}
int kunit_run_tests(struct kunit_suite *suite)
{
char param_desc[KUNIT_PARAM_DESC_SIZE];
struct kunit_case *test_case;
struct kunit_result_stats suite_stats = { 0 };
struct kunit_result_stats total_stats = { 0 };
/* Taint the kernel so we know we've run tests. */
add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
if (suite->suite_init) {
suite->suite_init_err = suite->suite_init(suite);
if (suite->suite_init_err) {
kunit_err(suite, KUNIT_SUBTEST_INDENT
"# failed to initialize (%d)", suite->suite_init_err);
goto suite_end;
}
}
kunit_print_suite_start(suite);
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
struct kunit_result_stats param_stats = { 0 };
kunit_init_test(&test, test_case->name, test_case->log);
if (test_case->status == KUNIT_SKIPPED) {
/* Test marked as skip */
test.status = KUNIT_SKIPPED;
kunit_update_stats(¶m_stats, test.status);
} else if (!test_case->generate_params) {
/* Non-parameterised test. */
test_case->status = KUNIT_SKIPPED;
kunit_run_case_catch_errors(suite, test_case, &test);
kunit_update_stats(¶m_stats, test.status);
} else {
/* Get initial param. */
param_desc[0] = '\0';
test.param_value = test_case->generate_params(NULL, param_desc);
test_case->status = KUNIT_SKIPPED;
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"KTAP version 1\n");
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"# Subtest: %s", test_case->name);
while (test.param_value) {
kunit_run_case_catch_errors(suite, test_case, &test);
if (param_desc[0] == '\0') {
snprintf(param_desc, sizeof(param_desc),
"param-%d", test.param_index);
}
kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE_PARAM,
test.status,
test.param_index + 1,
param_desc,
test.status_comment);
/* Get next param. */
param_desc[0] = '\0';
test.param_value = test_case->generate_params(test.param_value, param_desc);
test.param_index++;
kunit_update_stats(¶m_stats, test.status);
}
}
kunit_print_attr((void *)test_case, true, KUNIT_LEVEL_CASE);
kunit_print_test_stats(&test, param_stats);
kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE, test_case->status,
kunit_test_case_num(suite, test_case),
test_case->name,
test.status_comment);
kunit_update_stats(&suite_stats, test_case->status);
kunit_accumulate_stats(&total_stats, param_stats);
}
if (suite->suite_exit)
suite->suite_exit(suite);
kunit_print_suite_stats(suite, suite_stats, total_stats);
suite_end:
kunit_print_suite_end(suite);
return 0;
}
EXPORT_SYMBOL_GPL(kunit_run_tests);
static void kunit_init_suite(struct kunit_suite *suite)
{
kunit_debugfs_create_suite(suite);
suite->status_comment[0] = '\0';
suite->suite_init_err = 0;
}
bool kunit_enabled(void)
{
return enable_param;
}
int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites)
{
unsigned int i;
if (!kunit_enabled() && num_suites > 0) {
pr_info("kunit: disabled\n");
return 0;
}
static_branch_inc(&kunit_running);
for (i = 0; i < num_suites; i++) {
kunit_init_suite(suites[i]);
kunit_run_tests(suites[i]);
}
static_branch_dec(&kunit_running);
return 0;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
static void kunit_exit_suite(struct kunit_suite *suite)
{
kunit_debugfs_destroy_suite(suite);
}
void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites)
{
unsigned int i;
if (!kunit_enabled())
return;
for (i = 0; i < num_suites; i++)
kunit_exit_suite(suites[i]);
kunit_suite_counter = 1;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
#ifdef CONFIG_MODULES
static void kunit_module_init(struct module *mod)
{
struct kunit_suite_set suite_set = {
mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
};
const char *action = kunit_action();
int err = 0;
suite_set = kunit_filter_suites(&suite_set,
kunit_filter_glob() ?: "*.*",
kunit_filter(), kunit_filter_action(),
&err);
if (err)
pr_err("kunit module: error filtering suites: %d\n", err);
mod->kunit_suites = (struct kunit_suite **)suite_set.start;
mod->num_kunit_suites = suite_set.end - suite_set.start;
if (!action)
kunit_exec_run_tests(&suite_set, false);
else if (!strcmp(action, "list"))
kunit_exec_list_tests(&suite_set, false);
else if (!strcmp(action, "list_attr"))
kunit_exec_list_tests(&suite_set, true);
else
pr_err("kunit: unknown action '%s'\n", action);
}
static void kunit_module_exit(struct module *mod)
{
struct kunit_suite_set suite_set = {
mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
};
const char *action = kunit_action();
if (!action)
__kunit_test_suites_exit(mod->kunit_suites,
mod->num_kunit_suites);
if (suite_set.start)
kunit_free_suite_set(suite_set);
}
static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
void *data)
{
struct module *mod = data;
switch (val) {
case MODULE_STATE_LIVE:
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
kunit_module_init(mod);
break;
case MODULE_STATE_UNFORMED:
break;
}
return 0;
}
static struct notifier_block kunit_mod_nb = {
.notifier_call = kunit_module_notify,
.priority = 0,
};
#endif
void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
void *data;
data = kmalloc_array(n, size, gfp);
if (!data)
return NULL;
if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0)
return NULL;
return data;
}
EXPORT_SYMBOL_GPL(kunit_kmalloc_array);
void kunit_kfree(struct kunit *test, const void *ptr)
{
if (!ptr)
return;
kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr);
}
EXPORT_SYMBOL_GPL(kunit_kfree);
void kunit_cleanup(struct kunit *test)
{
struct kunit_resource *res;
unsigned long flags;
/*
* test->resources is a stack - each allocation must be freed in the
* reverse order from which it was added since one resource may depend
* on another for its entire lifetime.
* Also, we cannot use the normal list_for_each constructs, even the
* safe ones because *arbitrary* nodes may be deleted when
* kunit_resource_free is called; the list_for_each_safe variants only
* protect against the current node being deleted, not the next.
*/
while (true) {
spin_lock_irqsave(&test->lock, flags);
if (list_empty(&test->resources)) {
spin_unlock_irqrestore(&test->lock, flags);
break;
}
res = list_last_entry(&test->resources,
struct kunit_resource,
node);
/*
* Need to unlock here as a resource may remove another
* resource, and this can't happen if the test->lock
* is held.
*/
spin_unlock_irqrestore(&test->lock, flags);
kunit_remove_resource(test, res);
}
current->kunit_test = NULL;
}
EXPORT_SYMBOL_GPL(kunit_cleanup);
static int __init kunit_init(void)
{
/* Install the KUnit hook functions. */
kunit_install_hooks();
kunit_debugfs_init();
#ifdef CONFIG_MODULES
return register_module_notifier(&kunit_mod_nb);
#else
return 0;
#endif
}
late_initcall(kunit_init);
static void __exit kunit_exit(void)
{
memset(&kunit_hooks, 0, sizeof(kunit_hooks));
#ifdef CONFIG_MODULES
unregister_module_notifier(&kunit_mod_nb);
#endif
kunit_debugfs_cleanup();
}
module_exit(kunit_exit);
MODULE_LICENSE("GPL v2");
| linux-master | lib/kunit/test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit test for core test infrastructure.
*
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <[email protected]>
*/
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include "try-catch-impl.h"
struct kunit_try_catch_test_context {
struct kunit_try_catch *try_catch;
bool function_called;
};
static void kunit_test_successful_try(void *data)
{
struct kunit *test = data;
struct kunit_try_catch_test_context *ctx = test->priv;
ctx->function_called = true;
}
static void kunit_test_no_catch(void *data)
{
struct kunit *test = data;
KUNIT_FAIL(test, "Catch should not be called\n");
}
static void kunit_test_try_catch_successful_try_no_catch(struct kunit *test)
{
struct kunit_try_catch_test_context *ctx = test->priv;
struct kunit_try_catch *try_catch = ctx->try_catch;
kunit_try_catch_init(try_catch,
test,
kunit_test_successful_try,
kunit_test_no_catch);
kunit_try_catch_run(try_catch, test);
KUNIT_EXPECT_TRUE(test, ctx->function_called);
}
static void kunit_test_unsuccessful_try(void *data)
{
struct kunit *test = data;
struct kunit_try_catch_test_context *ctx = test->priv;
struct kunit_try_catch *try_catch = ctx->try_catch;
kunit_try_catch_throw(try_catch);
KUNIT_FAIL(test, "This line should never be reached\n");
}
static void kunit_test_catch(void *data)
{
struct kunit *test = data;
struct kunit_try_catch_test_context *ctx = test->priv;
ctx->function_called = true;
}
static void kunit_test_try_catch_unsuccessful_try_does_catch(struct kunit *test)
{
struct kunit_try_catch_test_context *ctx = test->priv;
struct kunit_try_catch *try_catch = ctx->try_catch;
kunit_try_catch_init(try_catch,
test,
kunit_test_unsuccessful_try,
kunit_test_catch);
kunit_try_catch_run(try_catch, test);
KUNIT_EXPECT_TRUE(test, ctx->function_called);
}
static int kunit_try_catch_test_init(struct kunit *test)
{
struct kunit_try_catch_test_context *ctx;
ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
test->priv = ctx;
ctx->try_catch = kunit_kmalloc(test,
sizeof(*ctx->try_catch),
GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->try_catch);
return 0;
}
static struct kunit_case kunit_try_catch_test_cases[] = {
KUNIT_CASE(kunit_test_try_catch_successful_try_no_catch),
KUNIT_CASE(kunit_test_try_catch_unsuccessful_try_does_catch),
{}
};
static struct kunit_suite kunit_try_catch_test_suite = {
.name = "kunit-try-catch-test",
.init = kunit_try_catch_test_init,
.test_cases = kunit_try_catch_test_cases,
};
/*
* Context for testing test managed resources
* is_resource_initialized is used to test arbitrary resources
*/
struct kunit_test_resource_context {
struct kunit test;
bool is_resource_initialized;
int allocate_order[2];
int free_order[4];
};
static int fake_resource_init(struct kunit_resource *res, void *context)
{
struct kunit_test_resource_context *ctx = context;
res->data = &ctx->is_resource_initialized;
ctx->is_resource_initialized = true;
return 0;
}
static void fake_resource_free(struct kunit_resource *res)
{
bool *is_resource_initialized = res->data;
*is_resource_initialized = false;
}
static void kunit_resource_test_init_resources(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
kunit_init_test(&ctx->test, "testing_test_init_test", NULL);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
static void kunit_resource_test_alloc_resource(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
struct kunit_resource *res;
kunit_resource_free_t free = fake_resource_free;
res = kunit_alloc_and_get_resource(&ctx->test,
fake_resource_init,
fake_resource_free,
GFP_KERNEL,
ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
KUNIT_EXPECT_PTR_EQ(test,
&ctx->is_resource_initialized,
(bool *)res->data);
KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
KUNIT_EXPECT_PTR_EQ(test, free, res->free);
kunit_put_resource(res);
}
static inline bool kunit_resource_instance_match(struct kunit *test,
struct kunit_resource *res,
void *match_data)
{
return res->data == match_data;
}
/*
* Note: tests below use kunit_alloc_and_get_resource(), so as a consequence
* they have a reference to the associated resource that they must release
* via kunit_put_resource(). In normal operation, users will only
* have to do this for cases where they use kunit_find_resource(), and the
* kunit_alloc_resource() function will be used (which does not take a
* resource reference).
*/
static void kunit_resource_test_destroy_resource(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
struct kunit_resource *res = kunit_alloc_and_get_resource(
&ctx->test,
fake_resource_init,
fake_resource_free,
GFP_KERNEL,
ctx);
kunit_put_resource(res);
KUNIT_ASSERT_FALSE(test,
kunit_destroy_resource(&ctx->test,
kunit_resource_instance_match,
res->data));
KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
static void kunit_resource_test_remove_resource(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
struct kunit_resource *res = kunit_alloc_and_get_resource(
&ctx->test,
fake_resource_init,
fake_resource_free,
GFP_KERNEL,
ctx);
/* The resource is in the list */
KUNIT_EXPECT_FALSE(test, list_empty(&ctx->test.resources));
/* Remove the resource. The pointer is still valid, but it can't be
* found.
*/
kunit_remove_resource(test, res);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
/* We haven't been freed yet. */
KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
/* Removing the resource multiple times is valid. */
kunit_remove_resource(test, res);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
/* Despite having been removed twice (from only one reference), the
* resource still has not been freed.
*/
KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
/* Free the resource. */
kunit_put_resource(res);
KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
}
static void kunit_resource_test_cleanup_resources(struct kunit *test)
{
int i;
struct kunit_test_resource_context *ctx = test->priv;
struct kunit_resource *resources[5];
for (i = 0; i < ARRAY_SIZE(resources); i++) {
resources[i] = kunit_alloc_and_get_resource(&ctx->test,
fake_resource_init,
fake_resource_free,
GFP_KERNEL,
ctx);
kunit_put_resource(resources[i]);
}
kunit_cleanup(&ctx->test);
KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
}
static void kunit_resource_test_mark_order(int order_array[],
size_t order_size,
int key)
{
int i;
for (i = 0; i < order_size && order_array[i]; i++)
;
order_array[i] = key;
}
#define KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, order_field, key) \
kunit_resource_test_mark_order(ctx->order_field, \
ARRAY_SIZE(ctx->order_field), \
key)
static int fake_resource_2_init(struct kunit_resource *res, void *context)
{
struct kunit_test_resource_context *ctx = context;
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
res->data = ctx;
return 0;
}
static void fake_resource_2_free(struct kunit_resource *res)
{
struct kunit_test_resource_context *ctx = res->data;
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
}
static int fake_resource_1_init(struct kunit_resource *res, void *context)
{
struct kunit_test_resource_context *ctx = context;
struct kunit_resource *res2;
res2 = kunit_alloc_and_get_resource(&ctx->test,
fake_resource_2_init,
fake_resource_2_free,
GFP_KERNEL,
ctx);
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
res->data = ctx;
kunit_put_resource(res2);
return 0;
}
static void fake_resource_1_free(struct kunit_resource *res)
{
struct kunit_test_resource_context *ctx = res->data;
KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
}
/*
* TODO([email protected]): replace the arrays that keep track of the
* order of allocation and freeing with strict mocks using the IN_SEQUENCE macro
* to assert allocation and freeing order when the feature becomes available.
*/
static void kunit_resource_test_proper_free_ordering(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
struct kunit_resource *res;
/* fake_resource_1 allocates a fake_resource_2 in its init. */
res = kunit_alloc_and_get_resource(&ctx->test,
fake_resource_1_init,
fake_resource_1_free,
GFP_KERNEL,
ctx);
/*
* Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
* before returning to fake_resource_1_init, it should be the first to
* put its key in the allocate_order array.
*/
KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
kunit_put_resource(res);
kunit_cleanup(&ctx->test);
/*
* Because fake_resource_2 finishes allocation before fake_resource_1,
* fake_resource_1 should be freed first since it could depend on
* fake_resource_2.
*/
KUNIT_EXPECT_EQ(test, ctx->free_order[0], 1);
KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
}
static void kunit_resource_test_static(struct kunit *test)
{
struct kunit_test_resource_context ctx;
struct kunit_resource res;
KUNIT_EXPECT_EQ(test, kunit_add_resource(test, NULL, NULL, &res, &ctx),
0);
KUNIT_EXPECT_PTR_EQ(test, res.data, (void *)&ctx);
kunit_cleanup(test);
KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
}
static void kunit_resource_test_named(struct kunit *test)
{
struct kunit_resource res1, res2, *found = NULL;
struct kunit_test_resource_context ctx;
KUNIT_EXPECT_EQ(test,
kunit_add_named_resource(test, NULL, NULL, &res1,
"resource_1", &ctx),
0);
KUNIT_EXPECT_PTR_EQ(test, res1.data, (void *)&ctx);
KUNIT_EXPECT_EQ(test,
kunit_add_named_resource(test, NULL, NULL, &res1,
"resource_1", &ctx),
-EEXIST);
KUNIT_EXPECT_EQ(test,
kunit_add_named_resource(test, NULL, NULL, &res2,
"resource_2", &ctx),
0);
found = kunit_find_named_resource(test, "resource_1");
KUNIT_EXPECT_PTR_EQ(test, found, &res1);
if (found)
kunit_put_resource(&res1);
KUNIT_EXPECT_EQ(test, kunit_destroy_named_resource(test, "resource_2"),
0);
kunit_cleanup(test);
KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
}
static void increment_int(void *ctx)
{
int *i = (int *)ctx;
(*i)++;
}
static void kunit_resource_test_action(struct kunit *test)
{
int num_actions = 0;
kunit_add_action(test, increment_int, &num_actions);
KUNIT_EXPECT_EQ(test, num_actions, 0);
kunit_cleanup(test);
KUNIT_EXPECT_EQ(test, num_actions, 1);
/* Once we've cleaned up, the action queue is empty. */
kunit_cleanup(test);
KUNIT_EXPECT_EQ(test, num_actions, 1);
/* Check the same function can be deferred multiple times. */
kunit_add_action(test, increment_int, &num_actions);
kunit_add_action(test, increment_int, &num_actions);
kunit_cleanup(test);
KUNIT_EXPECT_EQ(test, num_actions, 3);
}
static void kunit_resource_test_remove_action(struct kunit *test)
{
int num_actions = 0;
kunit_add_action(test, increment_int, &num_actions);
KUNIT_EXPECT_EQ(test, num_actions, 0);
kunit_remove_action(test, increment_int, &num_actions);
kunit_cleanup(test);
KUNIT_EXPECT_EQ(test, num_actions, 0);
}
static void kunit_resource_test_release_action(struct kunit *test)
{
int num_actions = 0;
kunit_add_action(test, increment_int, &num_actions);
KUNIT_EXPECT_EQ(test, num_actions, 0);
/* Runs immediately on trigger. */
kunit_release_action(test, increment_int, &num_actions);
KUNIT_EXPECT_EQ(test, num_actions, 1);
/* Doesn't run again on test exit. */
kunit_cleanup(test);
KUNIT_EXPECT_EQ(test, num_actions, 1);
}
static void action_order_1(void *ctx)
{
struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx;
KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 1);
kunit_log(KERN_INFO, current->kunit_test, "action_order_1");
}
static void action_order_2(void *ctx)
{
struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx;
KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 2);
kunit_log(KERN_INFO, current->kunit_test, "action_order_2");
}
static void kunit_resource_test_action_ordering(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
kunit_add_action(test, action_order_1, ctx);
kunit_add_action(test, action_order_2, ctx);
kunit_add_action(test, action_order_1, ctx);
kunit_add_action(test, action_order_2, ctx);
kunit_remove_action(test, action_order_1, ctx);
kunit_release_action(test, action_order_2, ctx);
kunit_cleanup(test);
/* [2 is triggered] [2], [(1 is cancelled)] [1] */
KUNIT_EXPECT_EQ(test, ctx->free_order[0], 2);
KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
KUNIT_EXPECT_EQ(test, ctx->free_order[2], 1);
}
static int kunit_resource_test_init(struct kunit *test)
{
struct kunit_test_resource_context *ctx =
kzalloc(sizeof(*ctx), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
test->priv = ctx;
kunit_init_test(&ctx->test, "test_test_context", NULL);
return 0;
}
static void kunit_resource_test_exit(struct kunit *test)
{
struct kunit_test_resource_context *ctx = test->priv;
kunit_cleanup(&ctx->test);
kfree(ctx);
}
static struct kunit_case kunit_resource_test_cases[] = {
KUNIT_CASE(kunit_resource_test_init_resources),
KUNIT_CASE(kunit_resource_test_alloc_resource),
KUNIT_CASE(kunit_resource_test_destroy_resource),
KUNIT_CASE(kunit_resource_test_remove_resource),
KUNIT_CASE(kunit_resource_test_cleanup_resources),
KUNIT_CASE(kunit_resource_test_proper_free_ordering),
KUNIT_CASE(kunit_resource_test_static),
KUNIT_CASE(kunit_resource_test_named),
KUNIT_CASE(kunit_resource_test_action),
KUNIT_CASE(kunit_resource_test_remove_action),
KUNIT_CASE(kunit_resource_test_release_action),
KUNIT_CASE(kunit_resource_test_action_ordering),
{}
};
static struct kunit_suite kunit_resource_test_suite = {
.name = "kunit-resource-test",
.init = kunit_resource_test_init,
.exit = kunit_resource_test_exit,
.test_cases = kunit_resource_test_cases,
};
static void kunit_log_test(struct kunit *test)
{
struct kunit_suite suite;
suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
kunit_log(KERN_INFO, test, "put this in log.");
kunit_log(KERN_INFO, test, "this too.");
kunit_log(KERN_INFO, &suite, "add to suite log.");
kunit_log(KERN_INFO, &suite, "along with this.");
#ifdef CONFIG_KUNIT_DEBUGFS
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(test->log, "put this in log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(test->log, "this too."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(suite.log, "add to suite log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
strstr(suite.log, "along with this."));
#else
KUNIT_EXPECT_NULL(test, test->log);
#endif
}
static void kunit_log_newline_test(struct kunit *test)
{
kunit_info(test, "Add newline\n");
if (test->log) {
KUNIT_ASSERT_NOT_NULL_MSG(test, strstr(test->log, "Add newline\n"),
"Missing log line, full log:\n%s", test->log);
KUNIT_EXPECT_NULL(test, strstr(test->log, "Add newline\n\n"));
} else {
kunit_skip(test, "only useful when debugfs is enabled");
}
}
static struct kunit_case kunit_log_test_cases[] = {
KUNIT_CASE(kunit_log_test),
KUNIT_CASE(kunit_log_newline_test),
{}
};
static struct kunit_suite kunit_log_test_suite = {
.name = "kunit-log-test",
.test_cases = kunit_log_test_cases,
};
static void kunit_status_set_failure_test(struct kunit *test)
{
struct kunit fake;
kunit_init_test(&fake, "fake test", NULL);
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
kunit_set_failure(&fake);
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
}
static void kunit_status_mark_skipped_test(struct kunit *test)
{
struct kunit fake;
kunit_init_test(&fake, "fake test", NULL);
/* Before: Should be SUCCESS with no comment. */
KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
/* Mark the test as skipped. */
kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
/* After: Should be SKIPPED with our comment. */
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
}
static struct kunit_case kunit_status_test_cases[] = {
KUNIT_CASE(kunit_status_set_failure_test),
KUNIT_CASE(kunit_status_mark_skipped_test),
{}
};
static struct kunit_suite kunit_status_test_suite = {
.name = "kunit_status",
.test_cases = kunit_status_test_cases,
};
static void kunit_current_test(struct kunit *test)
{
/* Check results of both current->kunit_test and
* kunit_get_current_test() are equivalent to current test.
*/
KUNIT_EXPECT_PTR_EQ(test, test, current->kunit_test);
KUNIT_EXPECT_PTR_EQ(test, test, kunit_get_current_test());
}
static void kunit_current_fail_test(struct kunit *test)
{
struct kunit fake;
kunit_init_test(&fake, "fake test", NULL);
KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
/* Set current->kunit_test to fake test. */
current->kunit_test = &fake;
kunit_fail_current_test("This should make `fake` test fail.");
KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
kunit_cleanup(&fake);
/* Reset current->kunit_test to current test. */
current->kunit_test = test;
}
static struct kunit_case kunit_current_test_cases[] = {
KUNIT_CASE(kunit_current_test),
KUNIT_CASE(kunit_current_fail_test),
{}
};
static struct kunit_suite kunit_current_test_suite = {
.name = "kunit_current",
.test_cases = kunit_current_test_cases,
};
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite, &kunit_status_test_suite,
&kunit_current_test_suite);
MODULE_LICENSE("GPL v2");
| linux-master | lib/kunit/kunit-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit test for struct string_stream.
*
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <[email protected]>
*/
#include <kunit/test.h>
#include <linux/slab.h>
#include "string-stream.h"
static void string_stream_test_empty_on_creation(struct kunit *test)
{
struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
}
static void string_stream_test_not_empty_after_add(struct kunit *test)
{
struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
string_stream_add(stream, "Foo");
KUNIT_EXPECT_FALSE(test, string_stream_is_empty(stream));
}
static void string_stream_test_get_string(struct kunit *test)
{
struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
char *output;
string_stream_add(stream, "Foo");
string_stream_add(stream, " %s", "bar");
output = string_stream_get_string(stream);
KUNIT_ASSERT_STREQ(test, output, "Foo bar");
}
static struct kunit_case string_stream_test_cases[] = {
KUNIT_CASE(string_stream_test_empty_on_creation),
KUNIT_CASE(string_stream_test_not_empty_after_add),
KUNIT_CASE(string_stream_test_get_string),
{}
};
static struct kunit_suite string_stream_test_suite = {
.name = "string-stream-test",
.test_cases = string_stream_test_cases
};
kunit_test_suites(&string_stream_test_suite);
| linux-master | lib/kunit/string-stream-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit API to save and access test attributes
*
* Copyright (C) 2023, Google LLC.
* Author: Rae Moar <[email protected]>
*/
#include <kunit/test.h>
#include <kunit/attributes.h>
/* Options for printing attributes:
* PRINT_ALWAYS - attribute is printed for every test case and suite if set
* PRINT_SUITE - attribute is printed for every suite if set but not for test cases
* PRINT_NEVER - attribute is never printed
*/
enum print_ops {
PRINT_ALWAYS,
PRINT_SUITE,
PRINT_NEVER,
};
/**
* struct kunit_attr - represents a test attribute and holds flexible
* helper functions to interact with attribute.
*
* @name: name of test attribute, eg. speed
* @get_attr: function to return attribute value given a test
* @to_string: function to return string representation of given
* attribute value
* @filter: function to indicate whether a given attribute value passes a
* filter
* @attr_default: default attribute value used during filtering
* @print: value of enum print_ops to indicate when to print attribute
*/
struct kunit_attr {
const char *name;
void *(*get_attr)(void *test_or_suite, bool is_test);
const char *(*to_string)(void *attr, bool *to_free);
int (*filter)(void *attr, const char *input, int *err);
void *attr_default;
enum print_ops print;
};
/* String Lists for enum Attributes */
static const char * const speed_str_list[] = {"unset", "very_slow", "slow", "normal"};
/* To String Methods */
static const char *attr_enum_to_string(void *attr, const char * const str_list[], bool *to_free)
{
long val = (long)attr;
*to_free = false;
if (!val)
return NULL;
return str_list[val];
}
static const char *attr_speed_to_string(void *attr, bool *to_free)
{
return attr_enum_to_string(attr, speed_str_list, to_free);
}
static const char *attr_string_to_string(void *attr, bool *to_free)
{
*to_free = false;
return (char *) attr;
}
/* Filter Methods */
static const char op_list[] = "<>!=";
/*
* Returns whether the inputted integer value matches the filter given
* by the operation string and inputted integer.
*/
static int int_filter(long val, const char *op, int input, int *err)
{
if (!strncmp(op, "<=", 2))
return (val <= input);
else if (!strncmp(op, ">=", 2))
return (val >= input);
else if (!strncmp(op, "!=", 2))
return (val != input);
else if (!strncmp(op, ">", 1))
return (val > input);
else if (!strncmp(op, "<", 1))
return (val < input);
else if (!strncmp(op, "=", 1))
return (val == input);
*err = -EINVAL;
pr_err("kunit executor: invalid filter operation: %s\n", op);
return false;
}
/*
* Returns whether the inputted enum value "attr" matches the filter given
* by the input string. Note: the str_list includes the corresponding string
* list to the enum values.
*/
static int attr_enum_filter(void *attr, const char *input, int *err,
const char * const str_list[], int max)
{
int i, j, input_int = -1;
long test_val = (long)attr;
const char *input_val = NULL;
for (i = 0; input[i]; i++) {
if (!strchr(op_list, input[i])) {
input_val = input + i;
break;
}
}
if (!input_val) {
*err = -EINVAL;
pr_err("kunit executor: filter value not found: %s\n", input);
return false;
}
for (j = 0; j <= max; j++) {
if (!strcmp(input_val, str_list[j]))
input_int = j;
}
if (input_int < 0) {
*err = -EINVAL;
pr_err("kunit executor: invalid filter input: %s\n", input);
return false;
}
return int_filter(test_val, input, input_int, err);
}
static int attr_speed_filter(void *attr, const char *input, int *err)
{
return attr_enum_filter(attr, input, err, speed_str_list, KUNIT_SPEED_MAX);
}
/*
* Returns whether the inputted string value (attr) matches the filter given
* by the input string.
*/
static int attr_string_filter(void *attr, const char *input, int *err)
{
char *str = attr;
if (!strncmp(input, "<", 1)) {
*err = -EINVAL;
pr_err("kunit executor: invalid filter input: %s\n", input);
return false;
} else if (!strncmp(input, ">", 1)) {
*err = -EINVAL;
pr_err("kunit executor: invalid filter input: %s\n", input);
return false;
} else if (!strncmp(input, "!=", 2)) {
return (strcmp(input + 2, str) != 0);
} else if (!strncmp(input, "=", 1)) {
return (strcmp(input + 1, str) == 0);
}
*err = -EINVAL;
pr_err("kunit executor: invalid filter operation: %s\n", input);
return false;
}
/* Get Attribute Methods */
static void *attr_speed_get(void *test_or_suite, bool is_test)
{
struct kunit_suite *suite = is_test ? NULL : test_or_suite;
struct kunit_case *test = is_test ? test_or_suite : NULL;
if (test)
return ((void *) test->attr.speed);
else
return ((void *) suite->attr.speed);
}
static void *attr_module_get(void *test_or_suite, bool is_test)
{
struct kunit_suite *suite = is_test ? NULL : test_or_suite;
struct kunit_case *test = is_test ? test_or_suite : NULL;
// Suites get their module attribute from their first test_case
if (test)
return ((void *) test->module_name);
else if (kunit_suite_num_test_cases(suite) > 0)
return ((void *) suite->test_cases[0].module_name);
else
return (void *) "";
}
/* List of all Test Attributes */
static struct kunit_attr kunit_attr_list[] = {
{
.name = "speed",
.get_attr = attr_speed_get,
.to_string = attr_speed_to_string,
.filter = attr_speed_filter,
.attr_default = (void *)KUNIT_SPEED_NORMAL,
.print = PRINT_ALWAYS,
},
{
.name = "module",
.get_attr = attr_module_get,
.to_string = attr_string_to_string,
.filter = attr_string_filter,
.attr_default = (void *)"",
.print = PRINT_SUITE,
}
};
/* Helper Functions to Access Attributes */
const char *kunit_attr_filter_name(struct kunit_attr_filter filter)
{
return filter.attr->name;
}
void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level)
{
int i;
bool to_free = false;
void *attr;
const char *attr_name, *attr_str;
struct kunit_suite *suite = is_test ? NULL : test_or_suite;
struct kunit_case *test = is_test ? test_or_suite : NULL;
for (i = 0; i < ARRAY_SIZE(kunit_attr_list); i++) {
if (kunit_attr_list[i].print == PRINT_NEVER ||
(test && kunit_attr_list[i].print == PRINT_SUITE))
continue;
attr = kunit_attr_list[i].get_attr(test_or_suite, is_test);
if (attr) {
attr_name = kunit_attr_list[i].name;
attr_str = kunit_attr_list[i].to_string(attr, &to_free);
if (test) {
kunit_log(KERN_INFO, test, "%*s# %s.%s: %s",
KUNIT_INDENT_LEN * test_level, "", test->name,
attr_name, attr_str);
} else {
kunit_log(KERN_INFO, suite, "%*s# %s: %s",
KUNIT_INDENT_LEN * test_level, "", attr_name, attr_str);
}
/* Free to_string of attribute if needed */
if (to_free)
kfree(attr_str);
}
}
}
/* Helper Functions to Filter Attributes */
int kunit_get_filter_count(char *input)
{
int i, comma_index = 0, count = 0;
for (i = 0; input[i]; i++) {
if (input[i] == ',') {
if ((i - comma_index) > 1)
count++;
comma_index = i;
}
}
if ((i - comma_index) > 0)
count++;
return count;
}
struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err)
{
struct kunit_attr_filter filter = {};
int i, j, comma_index = 0, new_start_index = 0;
int op_index = -1, attr_index = -1;
char op;
char *input = *filters;
/* Parse input until operation */
for (i = 0; input[i]; i++) {
if (op_index < 0 && strchr(op_list, input[i])) {
op_index = i;
} else if (!comma_index && input[i] == ',') {
comma_index = i;
} else if (comma_index && input[i] != ' ') {
new_start_index = i;
break;
}
}
if (op_index <= 0) {
*err = -EINVAL;
pr_err("kunit executor: filter operation not found: %s\n", input);
return filter;
}
/* Temporarily set operator to \0 character. */
op = input[op_index];
input[op_index] = '\0';
/* Find associated kunit_attr object */
for (j = 0; j < ARRAY_SIZE(kunit_attr_list); j++) {
if (!strcmp(input, kunit_attr_list[j].name)) {
attr_index = j;
break;
}
}
input[op_index] = op;
if (attr_index < 0) {
*err = -EINVAL;
pr_err("kunit executor: attribute not found: %s\n", input);
} else {
filter.attr = &kunit_attr_list[attr_index];
}
if (comma_index > 0) {
input[comma_index] = '\0';
filter.input = input + op_index;
input = input + new_start_index;
} else {
filter.input = input + op_index;
input = NULL;
}
*filters = input;
return filter;
}
struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
struct kunit_attr_filter filter, char *action, int *err)
{
int n = 0;
struct kunit_case *filtered, *test_case;
struct kunit_suite *copy;
void *suite_val, *test_val;
bool suite_result, test_result, default_result, result;
/* Allocate memory for new copy of suite and list of test cases */
copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
if (!copy)
return ERR_PTR(-ENOMEM);
kunit_suite_for_each_test_case(suite, test_case) { n++; }
filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
if (!filtered) {
kfree(copy);
return ERR_PTR(-ENOMEM);
}
n = 0;
/* Save filtering result on default value */
default_result = filter.attr->filter(filter.attr->attr_default, filter.input, err);
if (*err)
goto err;
/* Save suite attribute value and filtering result on that value */
suite_val = filter.attr->get_attr((void *)suite, false);
suite_result = filter.attr->filter(suite_val, filter.input, err);
if (*err)
goto err;
/* For each test case, save test case if passes filtering. */
kunit_suite_for_each_test_case(suite, test_case) {
test_val = filter.attr->get_attr((void *) test_case, true);
test_result = filter.attr->filter(filter.attr->get_attr(test_case, true),
filter.input, err);
if (*err)
goto err;
/*
* If attribute value of test case is set, filter on that value.
* If not, filter on suite value if set. If not, filter on
* default value.
*/
result = false;
if (test_val) {
if (test_result)
result = true;
} else if (suite_val) {
if (suite_result)
result = true;
} else if (default_result) {
result = true;
}
if (result) {
filtered[n++] = *test_case;
} else if (action && strcmp(action, "skip") == 0) {
test_case->status = KUNIT_SKIPPED;
filtered[n++] = *test_case;
}
}
err:
if (n == 0 || *err) {
kfree(copy);
kfree(filtered);
return NULL;
}
copy->test_cases = filtered;
return copy;
}
| linux-master | lib/kunit/attributes.c |
// SPDX-License-Identifier: Zlib
/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
#include <linux/export.h>
#include <linux/module.h>
#include "dfltcc_util.h"
#include "dfltcc.h"
char *oesc_msg(
char *buf,
int oesc
)
{
if (oesc == 0x00)
return NULL; /* Successful completion */
else {
#ifdef STATIC
return NULL; /* Ignore for pre-boot decompressor */
#else
sprintf(buf, "Operation-Ending-Supplemental Code is 0x%.2X", oesc);
return buf;
#endif
}
}
void dfltcc_reset_state(struct dfltcc_state *dfltcc_state) {
/* Initialize available functions */
if (is_dfltcc_enabled()) {
dfltcc(DFLTCC_QAF, &dfltcc_state->param, NULL, NULL, NULL, NULL, NULL);
memmove(&dfltcc_state->af, &dfltcc_state->param, sizeof(dfltcc_state->af));
} else
memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
/* Initialize parameter block */
memset(&dfltcc_state->param, 0, sizeof(dfltcc_state->param));
dfltcc_state->param.nt = 1;
dfltcc_state->param.ribm = DFLTCC_RIBM;
}
MODULE_LICENSE("GPL");
| linux-master | lib/zlib_dfltcc/dfltcc.c |
// SPDX-License-Identifier: Zlib
#include "../zlib_deflate/defutil.h"
#include "dfltcc_util.h"
#include "dfltcc_deflate.h"
#include <asm/setup.h>
#include <linux/export.h>
#include <linux/zutil.h>
#define GET_DFLTCC_DEFLATE_STATE(state) ((struct dfltcc_deflate_state *)GET_DFLTCC_STATE(state))
/*
* Compress.
*/
int dfltcc_can_deflate(
z_streamp strm
)
{
deflate_state *state = (deflate_state *)strm->state;
struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
/* Check for kernel dfltcc command line parameter */
if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
zlib_dfltcc_support == ZLIB_DFLTCC_INFLATE_ONLY)
return 0;
/* Unsupported compression settings */
if (!dfltcc_are_params_ok(state->level, state->w_bits, state->strategy,
dfltcc_state->level_mask))
return 0;
/* Unsupported hardware */
if (!is_bit_set(dfltcc_state->common.af.fns, DFLTCC_GDHT) ||
!is_bit_set(dfltcc_state->common.af.fns, DFLTCC_CMPR) ||
!is_bit_set(dfltcc_state->common.af.fmts, DFLTCC_FMT0))
return 0;
return 1;
}
EXPORT_SYMBOL(dfltcc_can_deflate);
void dfltcc_reset_deflate_state(z_streamp strm) {
deflate_state *state = (deflate_state *)strm->state;
struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
dfltcc_reset_state(&dfltcc_state->common);
/* Initialize tuning parameters */
if (zlib_dfltcc_support == ZLIB_DFLTCC_FULL_DEBUG)
dfltcc_state->level_mask = DFLTCC_LEVEL_MASK_DEBUG;
else
dfltcc_state->level_mask = DFLTCC_LEVEL_MASK;
dfltcc_state->block_size = DFLTCC_BLOCK_SIZE;
dfltcc_state->block_threshold = DFLTCC_FIRST_FHT_BLOCK_SIZE;
dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
}
EXPORT_SYMBOL(dfltcc_reset_deflate_state);
static void dfltcc_gdht(
z_streamp strm
)
{
deflate_state *state = (deflate_state *)strm->state;
struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
size_t avail_in = strm->avail_in;
dfltcc(DFLTCC_GDHT,
param, NULL, NULL,
&strm->next_in, &avail_in, NULL);
}
static dfltcc_cc dfltcc_cmpr(
z_streamp strm
)
{
deflate_state *state = (deflate_state *)strm->state;
struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
size_t avail_in = strm->avail_in;
size_t avail_out = strm->avail_out;
dfltcc_cc cc;
cc = dfltcc(DFLTCC_CMPR | HBT_CIRCULAR,
param, &strm->next_out, &avail_out,
&strm->next_in, &avail_in, state->window);
strm->total_in += (strm->avail_in - avail_in);
strm->total_out += (strm->avail_out - avail_out);
strm->avail_in = avail_in;
strm->avail_out = avail_out;
return cc;
}
static void send_eobs(
z_streamp strm,
const struct dfltcc_param_v0 *param
)
{
deflate_state *state = (deflate_state *)strm->state;
zlib_tr_send_bits(
state,
bi_reverse(param->eobs >> (15 - param->eobl), param->eobl),
param->eobl);
flush_pending(strm);
if (state->pending != 0) {
/* The remaining data is located in pending_out[0:pending]. If someone
* calls put_byte() - this might happen in deflate() - the byte will be
* placed into pending_buf[pending], which is incorrect. Move the
* remaining data to the beginning of pending_buf so that put_byte() is
* usable again.
*/
memmove(state->pending_buf, state->pending_out, state->pending);
state->pending_out = state->pending_buf;
}
#ifdef ZLIB_DEBUG
state->compressed_len += param->eobl;
#endif
}
int dfltcc_deflate(
z_streamp strm,
int flush,
block_state *result
)
{
deflate_state *state = (deflate_state *)strm->state;
struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
struct dfltcc_param_v0 *param = &dfltcc_state->common.param;
uInt masked_avail_in;
dfltcc_cc cc;
int need_empty_block;
int soft_bcc;
int no_flush;
if (!dfltcc_can_deflate(strm)) {
/* Clear history. */
if (flush == Z_FULL_FLUSH)
param->hl = 0;
return 0;
}
again:
masked_avail_in = 0;
soft_bcc = 0;
no_flush = flush == Z_NO_FLUSH;
/* No input data. Return, except when Continuation Flag is set, which means
* that DFLTCC has buffered some output in the parameter block and needs to
* be called again in order to flush it.
*/
if (strm->avail_in == 0 && !param->cf) {
/* A block is still open, and the hardware does not support closing
* blocks without adding data. Thus, close it manually.
*/
if (!no_flush && param->bcf) {
send_eobs(strm, param);
param->bcf = 0;
}
/* Let one of deflate_* functions write a trailing empty block. */
if (flush == Z_FINISH)
return 0;
/* Clear history. */
if (flush == Z_FULL_FLUSH)
param->hl = 0;
/* Trigger block post-processing if necessary. */
*result = no_flush ? need_more : block_done;
return 1;
}
/* There is an open non-BFINAL block, we are not going to close it just
* yet, we have compressed more than DFLTCC_BLOCK_SIZE bytes and we see
* more than DFLTCC_DHT_MIN_SAMPLE_SIZE bytes. Open a new block with a new
* DHT in order to adapt to a possibly changed input data distribution.
*/
if (param->bcf && no_flush &&
strm->total_in > dfltcc_state->block_threshold &&
strm->avail_in >= dfltcc_state->dht_threshold) {
if (param->cf) {
/* We need to flush the DFLTCC buffer before writing the
* End-of-block Symbol. Mask the input data and proceed as usual.
*/
masked_avail_in += strm->avail_in;
strm->avail_in = 0;
no_flush = 0;
} else {
/* DFLTCC buffer is empty, so we can manually write the
* End-of-block Symbol right away.
*/
send_eobs(strm, param);
param->bcf = 0;
dfltcc_state->block_threshold =
strm->total_in + dfltcc_state->block_size;
}
}
/* No space for compressed data. If we proceed, dfltcc_cmpr() will return
* DFLTCC_CC_OP1_TOO_SHORT without buffering header bits, but we will still
* set BCF=1, which is wrong. Avoid complications and return early.
*/
if (strm->avail_out == 0) {
*result = need_more;
return 1;
}
/* The caller gave us too much data. Pass only one block worth of
* uncompressed data to DFLTCC and mask the rest, so that on the next
* iteration we start a new block.
*/
if (no_flush && strm->avail_in > dfltcc_state->block_size) {
masked_avail_in += (strm->avail_in - dfltcc_state->block_size);
strm->avail_in = dfltcc_state->block_size;
}
/* When we have an open non-BFINAL deflate block and caller indicates that
* the stream is ending, we need to close an open deflate block and open a
* BFINAL one.
*/
need_empty_block = flush == Z_FINISH && param->bcf && !param->bhf;
/* Translate stream to parameter block */
param->cvt = CVT_ADLER32;
if (!no_flush)
/* We need to close a block. Always do this in software - when there is
* no input data, the hardware will not hohor BCC. */
soft_bcc = 1;
if (flush == Z_FINISH && !param->bcf)
/* We are about to open a BFINAL block, set Block Header Final bit
* until the stream ends.
*/
param->bhf = 1;
/* DFLTCC-CMPR will write to next_out, so make sure that buffers with
* higher precedence are empty.
*/
Assert(state->pending == 0, "There must be no pending bytes");
Assert(state->bi_valid < 8, "There must be less than 8 pending bits");
param->sbb = (unsigned int)state->bi_valid;
if (param->sbb > 0)
*strm->next_out = (Byte)state->bi_buf;
/* Honor history and check value */
param->nt = 0;
param->cv = strm->adler;
/* When opening a block, choose a Huffman-Table Type */
if (!param->bcf) {
if (strm->total_in == 0 && dfltcc_state->block_threshold > 0) {
param->htt = HTT_FIXED;
}
else {
param->htt = HTT_DYNAMIC;
dfltcc_gdht(strm);
}
}
/* Deflate */
do {
cc = dfltcc_cmpr(strm);
if (strm->avail_in < 4096 && masked_avail_in > 0)
/* We are about to call DFLTCC with a small input buffer, which is
* inefficient. Since there is masked data, there will be at least
* one more DFLTCC call, so skip the current one and make the next
* one handle more data.
*/
break;
} while (cc == DFLTCC_CC_AGAIN);
/* Translate parameter block to stream */
strm->msg = oesc_msg(dfltcc_state->common.msg, param->oesc);
state->bi_valid = param->sbb;
if (state->bi_valid == 0)
state->bi_buf = 0; /* Avoid accessing next_out */
else
state->bi_buf = *strm->next_out & ((1 << state->bi_valid) - 1);
strm->adler = param->cv;
/* Unmask the input data */
strm->avail_in += masked_avail_in;
masked_avail_in = 0;
/* If we encounter an error, it means there is a bug in DFLTCC call */
Assert(cc != DFLTCC_CC_OP2_CORRUPT || param->oesc == 0, "BUG");
/* Update Block-Continuation Flag. It will be used to check whether to call
* GDHT the next time.
*/
if (cc == DFLTCC_CC_OK) {
if (soft_bcc) {
send_eobs(strm, param);
param->bcf = 0;
dfltcc_state->block_threshold =
strm->total_in + dfltcc_state->block_size;
} else
param->bcf = 1;
if (flush == Z_FINISH) {
if (need_empty_block)
/* Make the current deflate() call also close the stream */
return 0;
else {
bi_windup(state);
*result = finish_done;
}
} else {
if (flush == Z_FULL_FLUSH)
param->hl = 0; /* Clear history */
*result = flush == Z_NO_FLUSH ? need_more : block_done;
}
} else {
param->bcf = 1;
*result = need_more;
}
if (strm->avail_in != 0 && strm->avail_out != 0)
goto again; /* deflate() must use all input or all output */
return 1;
}
EXPORT_SYMBOL(dfltcc_deflate);
| linux-master | lib/zlib_dfltcc/dfltcc_deflate.c |
// SPDX-License-Identifier: Zlib
#include "../zlib_inflate/inflate.h"
#include "dfltcc_util.h"
#include "dfltcc_inflate.h"
#include <asm/setup.h>
#include <linux/export.h>
#include <linux/zutil.h>
/*
* Expand.
*/
int dfltcc_can_inflate(
z_streamp strm
)
{
struct inflate_state *state = (struct inflate_state *)strm->state;
struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
/* Check for kernel dfltcc command line parameter */
if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
zlib_dfltcc_support == ZLIB_DFLTCC_DEFLATE_ONLY)
return 0;
/* Unsupported hardware */
return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
}
EXPORT_SYMBOL(dfltcc_can_inflate);
void dfltcc_reset_inflate_state(z_streamp strm) {
struct inflate_state *state = (struct inflate_state *)strm->state;
struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
dfltcc_reset_state(dfltcc_state);
}
EXPORT_SYMBOL(dfltcc_reset_inflate_state);
static int dfltcc_was_inflate_used(
z_streamp strm
)
{
struct inflate_state *state = (struct inflate_state *)strm->state;
struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
return !param->nt;
}
static int dfltcc_inflate_disable(
z_streamp strm
)
{
struct inflate_state *state = (struct inflate_state *)strm->state;
struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
if (!dfltcc_can_inflate(strm))
return 0;
if (dfltcc_was_inflate_used(strm))
/* DFLTCC has already decompressed some data. Since there is not
* enough information to resume decompression in software, the call
* must fail.
*/
return 1;
/* DFLTCC was not used yet - decompress in software */
memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
return 0;
}
static dfltcc_cc dfltcc_xpnd(
z_streamp strm
)
{
struct inflate_state *state = (struct inflate_state *)strm->state;
struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
size_t avail_in = strm->avail_in;
size_t avail_out = strm->avail_out;
dfltcc_cc cc;
cc = dfltcc(DFLTCC_XPND | HBT_CIRCULAR,
param, &strm->next_out, &avail_out,
&strm->next_in, &avail_in, state->window);
strm->avail_in = avail_in;
strm->avail_out = avail_out;
return cc;
}
dfltcc_inflate_action dfltcc_inflate(
z_streamp strm,
int flush,
int *ret
)
{
struct inflate_state *state = (struct inflate_state *)strm->state;
struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
struct dfltcc_param_v0 *param = &dfltcc_state->param;
dfltcc_cc cc;
if (flush == Z_BLOCK || flush == Z_PACKET_FLUSH) {
/* DFLTCC does not support stopping on block boundaries (Z_BLOCK flush option)
* as well as the use of Z_PACKET_FLUSH option (used exclusively by PPP driver)
*/
if (dfltcc_inflate_disable(strm)) {
*ret = Z_STREAM_ERROR;
return DFLTCC_INFLATE_BREAK;
} else
return DFLTCC_INFLATE_SOFTWARE;
}
if (state->last) {
if (state->bits != 0) {
strm->next_in++;
strm->avail_in--;
state->bits = 0;
}
state->mode = CHECK;
return DFLTCC_INFLATE_CONTINUE;
}
if (strm->avail_in == 0 && !param->cf)
return DFLTCC_INFLATE_BREAK;
if (!state->window || state->wsize == 0) {
state->mode = MEM;
return DFLTCC_INFLATE_CONTINUE;
}
/* Translate stream to parameter block */
param->cvt = CVT_ADLER32;
param->sbb = state->bits;
if (param->hl)
param->nt = 0; /* Honor history for the first block */
param->cv = state->check;
/* Inflate */
do {
cc = dfltcc_xpnd(strm);
} while (cc == DFLTCC_CC_AGAIN);
/* Translate parameter block to stream */
strm->msg = oesc_msg(dfltcc_state->msg, param->oesc);
state->last = cc == DFLTCC_CC_OK;
state->bits = param->sbb;
state->check = param->cv;
if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
/* Report an error if stream is corrupted */
state->mode = BAD;
return DFLTCC_INFLATE_CONTINUE;
}
state->mode = TYPEDO;
/* Break if operands are exhausted, otherwise continue looping */
return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
}
EXPORT_SYMBOL(dfltcc_inflate);
| linux-master | lib/zlib_dfltcc/dfltcc_inflate.c |
/*
* LZ4 - Fast LZ compression algorithm
* Copyright (C) 2011 - 2016, Yann Collet.
* BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* You can contact the author at :
* - LZ4 homepage : http://www.lz4.org
* - LZ4 source repository : https://github.com/lz4/lz4
*
* Changed for kernel usage by:
* Sven Schmidt <[email protected]>
*/
/*-************************************
* Dependencies
**************************************/
#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
/*-*****************************
* Decompression functions
*******************************/
#define DEBUGLOG(l, ...) {} /* disabled */
#ifndef assert
#define assert(condition) ((void)0)
#endif
/*
* LZ4_decompress_generic() :
* This generic decompression function covers all use cases.
* It shall be instantiated several times, using different sets of directives.
* Note that it is important for performance that this function really get inlined,
* in order to remove useless branches during compilation optimization.
*/
static FORCE_INLINE int LZ4_decompress_generic(
const char * const src,
char * const dst,
int srcSize,
/*
* If endOnInput == endOnInputSize,
* this value is `dstCapacity`
*/
int outputSize,
/* endOnOutputSize, endOnInputSize */
endCondition_directive endOnInput,
/* full, partial */
earlyEnd_directive partialDecoding,
/* noDict, withPrefix64k, usingExtDict */
dict_directive dict,
/* always <= dst, == dst when no prefix */
const BYTE * const lowPrefix,
/* only if dict == usingExtDict */
const BYTE * const dictStart,
/* note : = 0 if noDict */
const size_t dictSize
)
{
const BYTE *ip = (const BYTE *) src;
const BYTE * const iend = ip + srcSize;
BYTE *op = (BYTE *) dst;
BYTE * const oend = op + outputSize;
BYTE *cpy;
const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
const int safeDecode = (endOnInput == endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
/* Set up the "end" pointers for the shortcut. */
const BYTE *const shortiend = iend -
(endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
const BYTE *const shortoend = oend -
(endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
srcSize, outputSize);
/* Special cases */
assert(lowPrefix <= op);
assert(src != NULL);
/* Empty output buffer */
if ((endOnInput) && (unlikely(outputSize == 0)))
return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
if ((!endOnInput) && (unlikely(outputSize == 0)))
return (*ip == 0 ? 1 : -1);
if ((endOnInput) && unlikely(srcSize == 0))
return -1;
/* Main Loop : decode sequences */
while (1) {
size_t length;
const BYTE *match;
size_t offset;
/* get literal length */
unsigned int const token = *ip++;
length = token>>ML_BITS;
/* ip < iend before the increment */
assert(!endOnInput || ip <= iend);
/*
* A two-stage shortcut for the most common case:
* 1) If the literal length is 0..14, and there is enough
* space, enter the shortcut and copy 16 bytes on behalf
* of the literals (in the fast mode, only 8 bytes can be
* safely copied this way).
* 2) Further if the match length is 4..18, copy 18 bytes
* in a similar manner; but we ensure that there's enough
* space in the output for those 18 bytes earlier, upon
* entering the shortcut (in other words, there is a
* combined check for both stages).
*
* The & in the likely() below is intentionally not && so that
* some compilers can produce better parallelized runtime code
*/
if ((endOnInput ? length != RUN_MASK : length <= 8)
/*
* strictly "less than" on input, to re-enter
* the loop with at least one byte
*/
&& likely((endOnInput ? ip < shortiend : 1) &
(op <= shortoend))) {
/* Copy the literals */
LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
op += length; ip += length;
/*
* The second stage:
* prepare for match copying, decode full info.
* If it doesn't work out, the info won't be wasted.
*/
length = token & ML_MASK; /* match length */
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
assert(match <= op); /* check overflow */
/* Do not deal with overlapping matches. */
if ((length != ML_MASK) &&
(offset >= 8) &&
(dict == withPrefix64k || match >= lowPrefix)) {
/* Copy the match. */
LZ4_memcpy(op + 0, match + 0, 8);
LZ4_memcpy(op + 8, match + 8, 8);
LZ4_memcpy(op + 16, match + 16, 2);
op += length + MINMATCH;
/* Both stages worked, load the next token. */
continue;
}
/*
* The second stage didn't work out, but the info
* is ready. Propel it right to the point of match
* copying.
*/
goto _copy_match;
}
/* decode literal length */
if (length == RUN_MASK) {
unsigned int s;
if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
/* overflow detection */
goto _output_error;
}
do {
s = *ip++;
length += s;
} while (likely(endOnInput
? ip < iend - RUN_MASK
: 1) & (s == 255));
if ((safeDecode)
&& unlikely((uptrval)(op) +
length < (uptrval)(op))) {
/* overflow detection */
goto _output_error;
}
if ((safeDecode)
&& unlikely((uptrval)(ip) +
length < (uptrval)(ip))) {
/* overflow detection */
goto _output_error;
}
}
/* copy literals */
cpy = op + length;
LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
if (((endOnInput) && ((cpy > oend - MFLIMIT)
|| (ip + length > iend - (2 + 1 + LASTLITERALS))))
|| ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
if (partialDecoding) {
if (cpy > oend) {
/*
* Partial decoding :
* stop in the middle of literal segment
*/
cpy = oend;
length = oend - op;
}
if ((endOnInput)
&& (ip + length > iend)) {
/*
* Error :
* read attempt beyond
* end of input buffer
*/
goto _output_error;
}
} else {
if ((!endOnInput)
&& (cpy != oend)) {
/*
* Error :
* block decoding must
* stop exactly there
*/
goto _output_error;
}
if ((endOnInput)
&& ((ip + length != iend)
|| (cpy > oend))) {
/*
* Error :
* input must be consumed
*/
goto _output_error;
}
}
/*
* supports overlapping memory regions; only matters
* for in-place decompression scenarios
*/
LZ4_memmove(op, ip, length);
ip += length;
op += length;
/* Necessarily EOF when !partialDecoding.
* When partialDecoding, it is EOF if we've either
* filled the output buffer or
* can't proceed with reading an offset for following match.
*/
if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
break;
} else {
/* may overwrite up to WILDCOPYLENGTH beyond cpy */
LZ4_wildCopy(op, ip, cpy);
ip += length;
op = cpy;
}
/* get offset */
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
/* get matchlength */
length = token & ML_MASK;
_copy_match:
if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
/* Error : offset outside buffers */
goto _output_error;
}
/* costs ~1%; silence an msan warning when offset == 0 */
/*
* note : when partialDecoding, there is no guarantee that
* at least 4 bytes remain available in output buffer
*/
if (!partialDecoding) {
assert(oend > op);
assert(oend - op >= 4);
LZ4_write32(op, (U32)offset);
}
if (length == ML_MASK) {
unsigned int s;
do {
s = *ip++;
if ((endOnInput) && (ip > iend - LASTLITERALS))
goto _output_error;
length += s;
} while (s == 255);
if ((safeDecode)
&& unlikely(
(uptrval)(op) + length < (uptrval)op)) {
/* overflow detection */
goto _output_error;
}
}
length += MINMATCH;
/* match starting within external dictionary */
if ((dict == usingExtDict) && (match < lowPrefix)) {
if (unlikely(op + length > oend - LASTLITERALS)) {
/* doesn't respect parsing restriction */
if (!partialDecoding)
goto _output_error;
length = min(length, (size_t)(oend - op));
}
if (length <= (size_t)(lowPrefix - match)) {
/*
* match fits entirely within external
* dictionary : just copy
*/
memmove(op, dictEnd - (lowPrefix - match),
length);
op += length;
} else {
/*
* match stretches into both external
* dictionary and current block
*/
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
LZ4_memcpy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) {
/* overlap copy */
BYTE * const endOfMatch = op + restSize;
const BYTE *copyFrom = lowPrefix;
while (op < endOfMatch)
*op++ = *copyFrom++;
} else {
LZ4_memcpy(op, lowPrefix, restSize);
op += restSize;
}
}
continue;
}
/* copy match within block */
cpy = op + length;
/*
* partialDecoding :
* may not respect endBlock parsing restrictions
*/
assert(op <= oend);
if (partialDecoding &&
(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
size_t const mlen = min(length, (size_t)(oend - op));
const BYTE * const matchEnd = match + mlen;
BYTE * const copyEnd = op + mlen;
if (matchEnd > op) {
/* overlap copy */
while (op < copyEnd)
*op++ = *match++;
} else {
LZ4_memcpy(op, match, mlen);
}
op = copyEnd;
if (op == oend)
break;
continue;
}
if (unlikely(offset < 8)) {
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += inc32table[offset];
LZ4_memcpy(op + 4, match, 4);
match -= dec64table[offset];
} else {
LZ4_copy8(op, match);
match += 8;
}
op += 8;
if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
if (cpy > oend - LASTLITERALS) {
/*
* Error : last LASTLITERALS bytes
* must be literals (uncompressed)
*/
goto _output_error;
}
if (op < oCopyLimit) {
LZ4_wildCopy(op, match, oCopyLimit);
match += oCopyLimit - op;
op = oCopyLimit;
}
while (op < cpy)
*op++ = *match++;
} else {
LZ4_copy8(op, match);
if (length > 16)
LZ4_wildCopy(op + 8, match + 8, cpy);
}
op = cpy; /* wildcopy correction */
}
/* end of decoding */
if (endOnInput) {
/* Nb of output bytes decoded */
return (int) (((char *)op) - dst);
} else {
/* Nb of input bytes read */
return (int) (((const char *)ip) - src);
}
/* Overflow error detected */
_output_error:
return (int) (-(((const char *)ip) - src)) - 1;
}
int LZ4_decompress_safe(const char *source, char *dest,
int compressedSize, int maxDecompressedSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxDecompressedSize,
endOnInputSize, decode_full_block,
noDict, (BYTE *)dest, NULL, 0);
}
int LZ4_decompress_safe_partial(const char *src, char *dst,
int compressedSize, int targetOutputSize, int dstCapacity)
{
dstCapacity = min(targetOutputSize, dstCapacity);
return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
endOnInputSize, partial_decode,
noDict, (BYTE *)dst, NULL, 0);
}
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
{
return LZ4_decompress_generic(source, dest, 0, originalSize,
endOnOutputSize, decode_full_block,
withPrefix64k,
(BYTE *)dest - 64 * KB, NULL, 0);
}
/* ===== Instantiate a few more decoding cases, used more than once. ===== */
static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
int compressedSize, int maxOutputSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
withPrefix64k,
(BYTE *)dest - 64 * KB, NULL, 0);
}
static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
int compressedSize,
int maxOutputSize,
size_t prefixSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
noDict,
(BYTE *)dest - prefixSize, NULL, 0);
}
static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
int compressedSize, int maxOutputSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
usingExtDict, (BYTE *)dest,
(const BYTE *)dictStart, dictSize);
}
static int LZ4_decompress_fast_extDict(const char *source, char *dest,
int originalSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
0, originalSize,
endOnOutputSize, decode_full_block,
usingExtDict, (BYTE *)dest,
(const BYTE *)dictStart, dictSize);
}
/*
* The "double dictionary" mode, for use with e.g. ring buffers: the first part
* of the dictionary is passed as prefix, and the second via dictStart + dictSize.
* These routines are used only once, in LZ4_decompress_*_continue().
*/
static FORCE_INLINE
int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
int compressedSize, int maxOutputSize,
size_t prefixSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
compressedSize, maxOutputSize,
endOnInputSize, decode_full_block,
usingExtDict, (BYTE *)dest - prefixSize,
(const BYTE *)dictStart, dictSize);
}
static FORCE_INLINE
int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
int originalSize, size_t prefixSize,
const void *dictStart, size_t dictSize)
{
return LZ4_decompress_generic(source, dest,
0, originalSize,
endOnOutputSize, decode_full_block,
usingExtDict, (BYTE *)dest - prefixSize,
(const BYTE *)dictStart, dictSize);
}
/* ===== streaming decompression functions ===== */
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
const char *dictionary, int dictSize)
{
LZ4_streamDecode_t_internal *lz4sd =
&LZ4_streamDecode->internal_donotuse;
lz4sd->prefixSize = (size_t) dictSize;
lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
lz4sd->externalDict = NULL;
lz4sd->extDictSize = 0;
return 1;
}
/*
* *_continue() :
* These decoding functions allow decompression of multiple blocks
* in "streaming" mode.
* Previously decoded blocks must still be available at the memory
* position where they were decoded.
* If it's not possible, save the relevant part of
* decoded data into a safe buffer,
* and indicate where it stands using LZ4_setStreamDecode()
*/
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int compressedSize, int maxOutputSize)
{
LZ4_streamDecode_t_internal *lz4sd =
&LZ4_streamDecode->internal_donotuse;
int result;
if (lz4sd->prefixSize == 0) {
/* The first call, no dictionary yet. */
assert(lz4sd->extDictSize == 0);
result = LZ4_decompress_safe(source, dest,
compressedSize, maxOutputSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
lz4sd->prefixEnd = (BYTE *)dest + result;
} else if (lz4sd->prefixEnd == (BYTE *)dest) {
/* They're rolling the current segment. */
if (lz4sd->prefixSize >= 64 * KB - 1)
result = LZ4_decompress_safe_withPrefix64k(source, dest,
compressedSize, maxOutputSize);
else if (lz4sd->extDictSize == 0)
result = LZ4_decompress_safe_withSmallPrefix(source,
dest, compressedSize, maxOutputSize,
lz4sd->prefixSize);
else
result = LZ4_decompress_safe_doubleDict(source, dest,
compressedSize, maxOutputSize,
lz4sd->prefixSize,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize += result;
lz4sd->prefixEnd += result;
} else {
/*
* The buffer wraps around, or they're
* switching to another buffer.
*/
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
result = LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = result;
lz4sd->prefixEnd = (BYTE *)dest + result;
}
return result;
}
int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
const char *source, char *dest, int originalSize)
{
LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
int result;
if (lz4sd->prefixSize == 0) {
assert(lz4sd->extDictSize == 0);
result = LZ4_decompress_fast(source, dest, originalSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
lz4sd->prefixEnd = (BYTE *)dest + originalSize;
} else if (lz4sd->prefixEnd == (BYTE *)dest) {
if (lz4sd->prefixSize >= 64 * KB - 1 ||
lz4sd->extDictSize == 0)
result = LZ4_decompress_fast(source, dest,
originalSize);
else
result = LZ4_decompress_fast_doubleDict(source, dest,
originalSize, lz4sd->prefixSize,
lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize += originalSize;
lz4sd->prefixEnd += originalSize;
} else {
lz4sd->extDictSize = lz4sd->prefixSize;
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
result = LZ4_decompress_fast_extDict(source, dest,
originalSize, lz4sd->externalDict, lz4sd->extDictSize);
if (result <= 0)
return result;
lz4sd->prefixSize = originalSize;
lz4sd->prefixEnd = (BYTE *)dest + originalSize;
}
return result;
}
int LZ4_decompress_safe_usingDict(const char *source, char *dest,
int compressedSize, int maxOutputSize,
const char *dictStart, int dictSize)
{
if (dictSize == 0)
return LZ4_decompress_safe(source, dest,
compressedSize, maxOutputSize);
if (dictStart+dictSize == dest) {
if (dictSize >= 64 * KB - 1)
return LZ4_decompress_safe_withPrefix64k(source, dest,
compressedSize, maxOutputSize);
return LZ4_decompress_safe_withSmallPrefix(source, dest,
compressedSize, maxOutputSize, dictSize);
}
return LZ4_decompress_safe_forceExtDict(source, dest,
compressedSize, maxOutputSize, dictStart, dictSize);
}
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
int originalSize,
const char *dictStart, int dictSize)
{
if (dictSize == 0 || dictStart + dictSize == dest)
return LZ4_decompress_fast(source, dest, originalSize);
return LZ4_decompress_fast_extDict(source, dest, originalSize,
dictStart, dictSize);
}
#ifndef STATIC
EXPORT_SYMBOL(LZ4_decompress_safe);
EXPORT_SYMBOL(LZ4_decompress_safe_partial);
EXPORT_SYMBOL(LZ4_decompress_fast);
EXPORT_SYMBOL(LZ4_setStreamDecode);
EXPORT_SYMBOL(LZ4_decompress_safe_continue);
EXPORT_SYMBOL(LZ4_decompress_fast_continue);
EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 decompressor");
#endif
| linux-master | lib/lz4/lz4_decompress.c |
/*
* LZ4 HC - High Compression Mode of LZ4
* Copyright (C) 2011-2015, Yann Collet.
*
* BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* You can contact the author at :
* - LZ4 homepage : http://www.lz4.org
* - LZ4 source repository : https://github.com/lz4/lz4
*
* Changed for kernel usage by:
* Sven Schmidt <[email protected]>
*/
/*-************************************
* Dependencies
**************************************/
#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h> /* memset */
/* *************************************
* Local Constants and types
***************************************/
#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
#define HASH_FUNCTION(i) (((i) * 2654435761U) \
>> ((MINMATCH*8) - LZ4HC_HASH_LOG))
#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
static U32 LZ4HC_hashPtr(const void *ptr)
{
return HASH_FUNCTION(LZ4_read32(ptr));
}
/**************************************
* HC Compression
**************************************/
static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
{
memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
hc4->nextToUpdate = 64 * KB;
hc4->base = start - 64 * KB;
hc4->end = start;
hc4->dictBase = start - 64 * KB;
hc4->dictLimit = 64 * KB;
hc4->lowLimit = 64 * KB;
}
/* Update chains up to ip (excluded) */
static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
const BYTE *ip)
{
U16 * const chainTable = hc4->chainTable;
U32 * const hashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
U32 const target = (U32)(ip - base);
U32 idx = hc4->nextToUpdate;
while (idx < target) {
U32 const h = LZ4HC_hashPtr(base + idx);
size_t delta = idx - hashTable[h];
if (delta > MAX_DISTANCE)
delta = MAX_DISTANCE;
DELTANEXTU16(idx) = (U16)delta;
hashTable[h] = idx;
idx++;
}
hc4->nextToUpdate = target;
}
static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
const BYTE *ip,
const BYTE * const iLimit,
const BYTE **matchpos,
const int maxNbAttempts)
{
U16 * const chainTable = hc4->chainTable;
U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
const BYTE * const dictBase = hc4->dictBase;
const U32 dictLimit = hc4->dictLimit;
const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
? hc4->lowLimit
: (U32)(ip - base) - (64 * KB - 1);
U32 matchIndex;
int nbAttempts = maxNbAttempts;
size_t ml = 0;
/* HC4 match finder */
LZ4HC_Insert(hc4, ip);
matchIndex = HashTable[LZ4HC_hashPtr(ip)];
while ((matchIndex >= lowLimit)
&& (nbAttempts)) {
nbAttempts--;
if (matchIndex >= dictLimit) {
const BYTE * const match = base + matchIndex;
if (*(match + ml) == *(ip + ml)
&& (LZ4_read32(match) == LZ4_read32(ip))) {
size_t const mlt = LZ4_count(ip + MINMATCH,
match + MINMATCH, iLimit) + MINMATCH;
if (mlt > ml) {
ml = mlt;
*matchpos = match;
}
}
} else {
const BYTE * const match = dictBase + matchIndex;
if (LZ4_read32(match) == LZ4_read32(ip)) {
size_t mlt;
const BYTE *vLimit = ip
+ (dictLimit - matchIndex);
if (vLimit > iLimit)
vLimit = iLimit;
mlt = LZ4_count(ip + MINMATCH,
match + MINMATCH, vLimit) + MINMATCH;
if ((ip + mlt == vLimit)
&& (vLimit < iLimit))
mlt += LZ4_count(ip + mlt,
base + dictLimit,
iLimit);
if (mlt > ml) {
/* virtual matchpos */
ml = mlt;
*matchpos = base + matchIndex;
}
}
}
matchIndex -= DELTANEXTU16(matchIndex);
}
return (int)ml;
}
static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
LZ4HC_CCtx_internal *hc4,
const BYTE * const ip,
const BYTE * const iLowLimit,
const BYTE * const iHighLimit,
int longest,
const BYTE **matchpos,
const BYTE **startpos,
const int maxNbAttempts)
{
U16 * const chainTable = hc4->chainTable;
U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
const U32 dictLimit = hc4->dictLimit;
const BYTE * const lowPrefixPtr = base + dictLimit;
const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
? hc4->lowLimit
: (U32)(ip - base) - (64 * KB - 1);
const BYTE * const dictBase = hc4->dictBase;
U32 matchIndex;
int nbAttempts = maxNbAttempts;
int delta = (int)(ip - iLowLimit);
/* First Match */
LZ4HC_Insert(hc4, ip);
matchIndex = HashTable[LZ4HC_hashPtr(ip)];
while ((matchIndex >= lowLimit)
&& (nbAttempts)) {
nbAttempts--;
if (matchIndex >= dictLimit) {
const BYTE *matchPtr = base + matchIndex;
if (*(iLowLimit + longest)
== *(matchPtr - delta + longest)) {
if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
int mlt = MINMATCH + LZ4_count(
ip + MINMATCH,
matchPtr + MINMATCH,
iHighLimit);
int back = 0;
while ((ip + back > iLowLimit)
&& (matchPtr + back > lowPrefixPtr)
&& (ip[back - 1] == matchPtr[back - 1]))
back--;
mlt -= back;
if (mlt > longest) {
longest = (int)mlt;
*matchpos = matchPtr + back;
*startpos = ip + back;
}
}
}
} else {
const BYTE * const matchPtr = dictBase + matchIndex;
if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
size_t mlt;
int back = 0;
const BYTE *vLimit = ip + (dictLimit - matchIndex);
if (vLimit > iHighLimit)
vLimit = iHighLimit;
mlt = LZ4_count(ip + MINMATCH,
matchPtr + MINMATCH, vLimit) + MINMATCH;
if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
mlt += LZ4_count(ip + mlt, base + dictLimit,
iHighLimit);
while ((ip + back > iLowLimit)
&& (matchIndex + back > lowLimit)
&& (ip[back - 1] == matchPtr[back - 1]))
back--;
mlt -= back;
if ((int)mlt > longest) {
longest = (int)mlt;
*matchpos = base + matchIndex + back;
*startpos = ip + back;
}
}
}
matchIndex -= DELTANEXTU16(matchIndex);
}
return longest;
}
static FORCE_INLINE int LZ4HC_encodeSequence(
const BYTE **ip,
BYTE **op,
const BYTE **anchor,
int matchLength,
const BYTE * const match,
limitedOutput_directive limitedOutputBuffer,
BYTE *oend)
{
int length;
BYTE *token;
/* Encode Literal length */
length = (int)(*ip - *anchor);
token = (*op)++;
if ((limitedOutputBuffer)
&& ((*op + (length>>8)
+ length + (2 + 1 + LASTLITERALS)) > oend)) {
/* Check output limit */
return 1;
}
if (length >= (int)RUN_MASK) {
int len;
*token = (RUN_MASK<<ML_BITS);
len = length - RUN_MASK;
for (; len > 254 ; len -= 255)
*(*op)++ = 255;
*(*op)++ = (BYTE)len;
} else
*token = (BYTE)(length<<ML_BITS);
/* Copy Literals */
LZ4_wildCopy(*op, *anchor, (*op) + length);
*op += length;
/* Encode Offset */
LZ4_writeLE16(*op, (U16)(*ip - match));
*op += 2;
/* Encode MatchLength */
length = (int)(matchLength - MINMATCH);
if ((limitedOutputBuffer)
&& (*op + (length>>8)
+ (1 + LASTLITERALS) > oend)) {
/* Check output limit */
return 1;
}
if (length >= (int)ML_MASK) {
*token += ML_MASK;
length -= ML_MASK;
for (; length > 509 ; length -= 510) {
*(*op)++ = 255;
*(*op)++ = 255;
}
if (length > 254) {
length -= 255;
*(*op)++ = 255;
}
*(*op)++ = (BYTE)length;
} else
*token += (BYTE)(length);
/* Prepare next loop */
*ip += matchLength;
*anchor = *ip;
return 0;
}
static int LZ4HC_compress_generic(
LZ4HC_CCtx_internal *const ctx,
const char * const source,
char * const dest,
int const inputSize,
int const maxOutputSize,
int compressionLevel,
limitedOutput_directive limit
)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *anchor = ip;
const BYTE * const iend = ip + inputSize;
const BYTE * const mflimit = iend - MFLIMIT;
const BYTE * const matchlimit = (iend - LASTLITERALS);
BYTE *op = (BYTE *) dest;
BYTE * const oend = op + maxOutputSize;
unsigned int maxNbAttempts;
int ml, ml2, ml3, ml0;
const BYTE *ref = NULL;
const BYTE *start2 = NULL;
const BYTE *ref2 = NULL;
const BYTE *start3 = NULL;
const BYTE *ref3 = NULL;
const BYTE *start0;
const BYTE *ref0;
/* init */
if (compressionLevel > LZ4HC_MAX_CLEVEL)
compressionLevel = LZ4HC_MAX_CLEVEL;
if (compressionLevel < 1)
compressionLevel = LZ4HC_DEFAULT_CLEVEL;
maxNbAttempts = 1 << (compressionLevel - 1);
ctx->end += inputSize;
ip++;
/* Main Loop */
while (ip < mflimit) {
ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
matchlimit, (&ref), maxNbAttempts);
if (!ml) {
ip++;
continue;
}
/* saved, in case we would skip too much */
start0 = ip;
ref0 = ref;
ml0 = ml;
_Search2:
if (ip + ml < mflimit)
ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
ip + ml - 2, ip + 0,
matchlimit, ml, &ref2,
&start2, maxNbAttempts);
else
ml2 = ml;
if (ml2 == ml) {
/* No better match */
if (LZ4HC_encodeSequence(&ip, &op,
&anchor, ml, ref, limit, oend))
return 0;
continue;
}
if (start0 < ip) {
if (start2 < ip + ml0) {
/* empirical */
ip = start0;
ref = ref0;
ml = ml0;
}
}
/* Here, start0 == ip */
if ((start2 - ip) < 3) {
/* First Match too small : removed */
ml = ml2;
ip = start2;
ref = ref2;
goto _Search2;
}
_Search3:
/*
* Currently we have :
* ml2 > ml1, and
* ip1 + 3 <= ip2 (usually < ip1 + ml1)
*/
if ((start2 - ip) < OPTIMAL_ML) {
int correction;
int new_ml = ml;
if (new_ml > OPTIMAL_ML)
new_ml = OPTIMAL_ML;
if (ip + new_ml > start2 + ml2 - MINMATCH)
new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = new_ml - (int)(start2 - ip);
if (correction > 0) {
start2 += correction;
ref2 += correction;
ml2 -= correction;
}
}
/*
* Now, we have start2 = ip + new_ml,
* with new_ml = min(ml, OPTIMAL_ML = 18)
*/
if (start2 + ml2 < mflimit)
ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
start2 + ml2 - 3, start2,
matchlimit, ml2, &ref3, &start3,
maxNbAttempts);
else
ml3 = ml2;
if (ml3 == ml2) {
/* No better match : 2 sequences to encode */
/* ip & ref are known; Now for ml */
if (start2 < ip + ml)
ml = (int)(start2 - ip);
/* Now, encode 2 sequences */
if (LZ4HC_encodeSequence(&ip, &op, &anchor,
ml, ref, limit, oend))
return 0;
ip = start2;
if (LZ4HC_encodeSequence(&ip, &op, &anchor,
ml2, ref2, limit, oend))
return 0;
continue;
}
if (start3 < ip + ml + 3) {
/* Not enough space for match 2 : remove it */
if (start3 >= (ip + ml)) {
/* can write Seq1 immediately
* ==> Seq2 is removed,
* so Seq3 becomes Seq1
*/
if (start2 < ip + ml) {
int correction = (int)(ip + ml - start2);
start2 += correction;
ref2 += correction;
ml2 -= correction;
if (ml2 < MINMATCH) {
start2 = start3;
ref2 = ref3;
ml2 = ml3;
}
}
if (LZ4HC_encodeSequence(&ip, &op, &anchor,
ml, ref, limit, oend))
return 0;
ip = start3;
ref = ref3;
ml = ml3;
start0 = start2;
ref0 = ref2;
ml0 = ml2;
goto _Search2;
}
start2 = start3;
ref2 = ref3;
ml2 = ml3;
goto _Search3;
}
/*
* OK, now we have 3 ascending matches;
* let's write at least the first one
* ip & ref are known; Now for ml
*/
if (start2 < ip + ml) {
if ((start2 - ip) < (int)ML_MASK) {
int correction;
if (ml > OPTIMAL_ML)
ml = OPTIMAL_ML;
if (ip + ml > start2 + ml2 - MINMATCH)
ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = ml - (int)(start2 - ip);
if (correction > 0) {
start2 += correction;
ref2 += correction;
ml2 -= correction;
}
} else
ml = (int)(start2 - ip);
}
if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
ref, limit, oend))
return 0;
ip = start2;
ref = ref2;
ml = ml2;
start2 = start3;
ref2 = ref3;
ml2 = ml3;
goto _Search3;
}
/* Encode Last Literals */
{
int lastRun = (int)(iend - anchor);
if ((limit)
&& (((char *)op - dest) + lastRun + 1
+ ((lastRun + 255 - RUN_MASK)/255)
> (U32)maxOutputSize)) {
/* Check output limit */
return 0;
}
if (lastRun >= (int)RUN_MASK) {
*op++ = (RUN_MASK<<ML_BITS);
lastRun -= RUN_MASK;
for (; lastRun > 254 ; lastRun -= 255)
*op++ = 255;
*op++ = (BYTE) lastRun;
} else
*op++ = (BYTE)(lastRun<<ML_BITS);
LZ4_memcpy(op, anchor, iend - anchor);
op += iend - anchor;
}
/* End */
return (int) (((char *)op) - dest);
}
static int LZ4_compress_HC_extStateHC(
void *state,
const char *src,
char *dst,
int srcSize,
int maxDstSize,
int compressionLevel)
{
LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
/* Error : state is not aligned
* for pointers (32 or 64 bits)
*/
return 0;
}
LZ4HC_init(ctx, (const BYTE *)src);
if (maxDstSize < LZ4_compressBound(srcSize))
return LZ4HC_compress_generic(ctx, src, dst,
srcSize, maxDstSize, compressionLevel, limitedOutput);
else
return LZ4HC_compress_generic(ctx, src, dst,
srcSize, maxDstSize, compressionLevel, noLimit);
}
int LZ4_compress_HC(const char *src, char *dst, int srcSize,
int maxDstSize, int compressionLevel, void *wrkmem)
{
return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
srcSize, maxDstSize, compressionLevel);
}
EXPORT_SYMBOL(LZ4_compress_HC);
/**************************************
* Streaming Functions
**************************************/
void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
{
LZ4_streamHCPtr->internal_donotuse.base = NULL;
LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
}
int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
const char *dictionary,
int dictSize)
{
LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
if (dictSize > 64 * KB) {
dictionary += dictSize - 64 * KB;
dictSize = 64 * KB;
}
LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
if (dictSize >= 4)
LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
ctxPtr->end = (const BYTE *)dictionary + dictSize;
return dictSize;
}
EXPORT_SYMBOL(LZ4_loadDictHC);
/* compression */
static void LZ4HC_setExternalDict(
LZ4HC_CCtx_internal *ctxPtr,
const BYTE *newBlock)
{
if (ctxPtr->end >= ctxPtr->base + 4) {
/* Referencing remaining dictionary content */
LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
}
/*
* Only one memory segment for extDict,
* so any previous extDict is lost at this stage
*/
ctxPtr->lowLimit = ctxPtr->dictLimit;
ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
ctxPtr->dictBase = ctxPtr->base;
ctxPtr->base = newBlock - ctxPtr->dictLimit;
ctxPtr->end = newBlock;
/* match referencing will resume from there */
ctxPtr->nextToUpdate = ctxPtr->dictLimit;
}
static int LZ4_compressHC_continue_generic(
LZ4_streamHC_t *LZ4_streamHCPtr,
const char *source,
char *dest,
int inputSize,
int maxOutputSize,
limitedOutput_directive limit)
{
LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
/* auto - init if forgotten */
if (ctxPtr->base == NULL)
LZ4HC_init(ctxPtr, (const BYTE *) source);
/* Check overflow */
if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
- ctxPtr->dictLimit;
if (dictSize > 64 * KB)
dictSize = 64 * KB;
LZ4_loadDictHC(LZ4_streamHCPtr,
(const char *)(ctxPtr->end) - dictSize, (int)dictSize);
}
/* Check if blocks follow each other */
if ((const BYTE *)source != ctxPtr->end)
LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
/* Check overlapping input/dictionary space */
{
const BYTE *sourceEnd = (const BYTE *) source + inputSize;
const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
if ((sourceEnd > dictBegin)
&& ((const BYTE *)source < dictEnd)) {
if (sourceEnd > dictEnd)
sourceEnd = dictEnd;
ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
ctxPtr->lowLimit = ctxPtr->dictLimit;
}
}
return LZ4HC_compress_generic(ctxPtr, source, dest,
inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
}
int LZ4_compress_HC_continue(
LZ4_streamHC_t *LZ4_streamHCPtr,
const char *source,
char *dest,
int inputSize,
int maxOutputSize)
{
if (maxOutputSize < LZ4_compressBound(inputSize))
return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
source, dest, inputSize, maxOutputSize, limitedOutput);
else
return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
source, dest, inputSize, maxOutputSize, noLimit);
}
EXPORT_SYMBOL(LZ4_compress_HC_continue);
/* dictionary saving */
int LZ4_saveDictHC(
LZ4_streamHC_t *LZ4_streamHCPtr,
char *safeBuffer,
int dictSize)
{
LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
int const prefixSize = (int)(streamPtr->end
- (streamPtr->base + streamPtr->dictLimit));
if (dictSize > 64 * KB)
dictSize = 64 * KB;
if (dictSize < 4)
dictSize = 0;
if (dictSize > prefixSize)
dictSize = prefixSize;
memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
{
U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
streamPtr->end = (const BYTE *)safeBuffer + dictSize;
streamPtr->base = streamPtr->end - endIndex;
streamPtr->dictLimit = endIndex - dictSize;
streamPtr->lowLimit = endIndex - dictSize;
if (streamPtr->nextToUpdate < streamPtr->dictLimit)
streamPtr->nextToUpdate = streamPtr->dictLimit;
}
return dictSize;
}
EXPORT_SYMBOL(LZ4_saveDictHC);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 HC compressor");
| linux-master | lib/lz4/lz4hc_compress.c |
/*
* LZ4 - Fast LZ compression algorithm
* Copyright (C) 2011 - 2016, Yann Collet.
* BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* You can contact the author at :
* - LZ4 homepage : http://www.lz4.org
* - LZ4 source repository : https://github.com/lz4/lz4
*
* Changed for kernel usage by:
* Sven Schmidt <[email protected]>
*/
/*-************************************
* Dependencies
**************************************/
#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
static const int LZ4_minLength = (MFLIMIT + 1);
static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
/*-******************************
* Compression functions
********************************/
static FORCE_INLINE U32 LZ4_hash4(
U32 sequence,
tableType_t const tableType)
{
if (tableType == byU16)
return ((sequence * 2654435761U)
>> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
else
return ((sequence * 2654435761U)
>> ((MINMATCH * 8) - LZ4_HASHLOG));
}
static FORCE_INLINE U32 LZ4_hash5(
U64 sequence,
tableType_t const tableType)
{
const U32 hashLog = (tableType == byU16)
? LZ4_HASHLOG + 1
: LZ4_HASHLOG;
#if LZ4_LITTLE_ENDIAN
static const U64 prime5bytes = 889523592379ULL;
return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
#else
static const U64 prime8bytes = 11400714785074694791ULL;
return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
#endif
}
static FORCE_INLINE U32 LZ4_hashPosition(
const void *p,
tableType_t const tableType)
{
#if LZ4_ARCH64
if (tableType == byU32)
return LZ4_hash5(LZ4_read_ARCH(p), tableType);
#endif
return LZ4_hash4(LZ4_read32(p), tableType);
}
static void LZ4_putPositionOnHash(
const BYTE *p,
U32 h,
void *tableBase,
tableType_t const tableType,
const BYTE *srcBase)
{
switch (tableType) {
case byPtr:
{
const BYTE **hashTable = (const BYTE **)tableBase;
hashTable[h] = p;
return;
}
case byU32:
{
U32 *hashTable = (U32 *) tableBase;
hashTable[h] = (U32)(p - srcBase);
return;
}
case byU16:
{
U16 *hashTable = (U16 *) tableBase;
hashTable[h] = (U16)(p - srcBase);
return;
}
}
}
static FORCE_INLINE void LZ4_putPosition(
const BYTE *p,
void *tableBase,
tableType_t tableType,
const BYTE *srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
}
static const BYTE *LZ4_getPositionOnHash(
U32 h,
void *tableBase,
tableType_t tableType,
const BYTE *srcBase)
{
if (tableType == byPtr) {
const BYTE **hashTable = (const BYTE **) tableBase;
return hashTable[h];
}
if (tableType == byU32) {
const U32 * const hashTable = (U32 *) tableBase;
return hashTable[h] + srcBase;
}
{
/* default, to ensure a return */
const U16 * const hashTable = (U16 *) tableBase;
return hashTable[h] + srcBase;
}
}
static FORCE_INLINE const BYTE *LZ4_getPosition(
const BYTE *p,
void *tableBase,
tableType_t tableType,
const BYTE *srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
/*
* LZ4_compress_generic() :
* inlined, to ensure branches are decided at compilation time
*/
static FORCE_INLINE int LZ4_compress_generic(
LZ4_stream_t_internal * const dictPtr,
const char * const source,
char * const dest,
const int inputSize,
const int maxOutputSize,
const limitedOutput_directive outputLimited,
const tableType_t tableType,
const dict_directive dict,
const dictIssue_directive dictIssue,
const U32 acceleration)
{
const BYTE *ip = (const BYTE *) source;
const BYTE *base;
const BYTE *lowLimit;
const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
const BYTE * const dictionary = dictPtr->dictionary;
const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
const size_t dictDelta = dictEnd - (const BYTE *)source;
const BYTE *anchor = (const BYTE *) source;
const BYTE * const iend = ip + inputSize;
const BYTE * const mflimit = iend - MFLIMIT;
const BYTE * const matchlimit = iend - LASTLITERALS;
BYTE *op = (BYTE *) dest;
BYTE * const olimit = op + maxOutputSize;
U32 forwardH;
size_t refDelta = 0;
/* Init conditions */
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
/* Unsupported inputSize, too large (or negative) */
return 0;
}
switch (dict) {
case noDict:
default:
base = (const BYTE *)source;
lowLimit = (const BYTE *)source;
break;
case withPrefix64k:
base = (const BYTE *)source - dictPtr->currentOffset;
lowLimit = (const BYTE *)source - dictPtr->dictSize;
break;
case usingExtDict:
base = (const BYTE *)source - dictPtr->currentOffset;
lowLimit = (const BYTE *)source;
break;
}
if ((tableType == byU16)
&& (inputSize >= LZ4_64Klimit)) {
/* Size too large (not within 64K limit) */
return 0;
}
if (inputSize < LZ4_minLength) {
/* Input too small, no compression (all literals) */
goto _last_literals;
}
/* First Byte */
LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
ip++;
forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
for ( ; ; ) {
const BYTE *match;
BYTE *token;
/* Find a match */
{
const BYTE *forwardIp = ip;
unsigned int step = 1;
unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
do {
U32 const h = forwardH;
ip = forwardIp;
forwardIp += step;
step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
if (unlikely(forwardIp > mflimit))
goto _last_literals;
match = LZ4_getPositionOnHash(h,
dictPtr->hashTable,
tableType, base);
if (dict == usingExtDict) {
if (match < (const BYTE *)source) {
refDelta = dictDelta;
lowLimit = dictionary;
} else {
refDelta = 0;
lowLimit = (const BYTE *)source;
} }
forwardH = LZ4_hashPosition(forwardIp,
tableType);
LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
tableType, base);
} while (((dictIssue == dictSmall)
? (match < lowRefLimit)
: 0)
|| ((tableType == byU16)
? 0
: (match + MAX_DISTANCE < ip))
|| (LZ4_read32(match + refDelta)
!= LZ4_read32(ip)));
}
/* Catch up */
while (((ip > anchor) & (match + refDelta > lowLimit))
&& (unlikely(ip[-1] == match[refDelta - 1]))) {
ip--;
match--;
}
/* Encode Literals */
{
unsigned const int litLength = (unsigned int)(ip - anchor);
token = op++;
if ((outputLimited) &&
/* Check output buffer overflow */
(unlikely(op + litLength +
(2 + 1 + LASTLITERALS) +
(litLength / 255) > olimit)))
return 0;
if (litLength >= RUN_MASK) {
int len = (int)litLength - RUN_MASK;
*token = (RUN_MASK << ML_BITS);
for (; len >= 255; len -= 255)
*op++ = 255;
*op++ = (BYTE)len;
} else
*token = (BYTE)(litLength << ML_BITS);
/* Copy Literals */
LZ4_wildCopy(op, anchor, op + litLength);
op += litLength;
}
_next_match:
/* Encode Offset */
LZ4_writeLE16(op, (U16)(ip - match));
op += 2;
/* Encode MatchLength */
{
unsigned int matchCode;
if ((dict == usingExtDict)
&& (lowLimit == dictionary)) {
const BYTE *limit;
match += refDelta;
limit = ip + (dictEnd - match);
if (limit > matchlimit)
limit = matchlimit;
matchCode = LZ4_count(ip + MINMATCH,
match + MINMATCH, limit);
ip += MINMATCH + matchCode;
if (ip == limit) {
unsigned const int more = LZ4_count(ip,
(const BYTE *)source,
matchlimit);
matchCode += more;
ip += more;
}
} else {
matchCode = LZ4_count(ip + MINMATCH,
match + MINMATCH, matchlimit);
ip += MINMATCH + matchCode;
}
if (outputLimited &&
/* Check output buffer overflow */
(unlikely(op +
(1 + LASTLITERALS) +
(matchCode >> 8) > olimit)))
return 0;
if (matchCode >= ML_MASK) {
*token += ML_MASK;
matchCode -= ML_MASK;
LZ4_write32(op, 0xFFFFFFFF);
while (matchCode >= 4 * 255) {
op += 4;
LZ4_write32(op, 0xFFFFFFFF);
matchCode -= 4 * 255;
}
op += matchCode / 255;
*op++ = (BYTE)(matchCode % 255);
} else
*token += (BYTE)(matchCode);
}
anchor = ip;
/* Test end of chunk */
if (ip > mflimit)
break;
/* Fill table */
LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
/* Test next position */
match = LZ4_getPosition(ip, dictPtr->hashTable,
tableType, base);
if (dict == usingExtDict) {
if (match < (const BYTE *)source) {
refDelta = dictDelta;
lowLimit = dictionary;
} else {
refDelta = 0;
lowLimit = (const BYTE *)source;
}
}
LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
&& (match + MAX_DISTANCE >= ip)
&& (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
token = op++;
*token = 0;
goto _next_match;
}
/* Prepare next loop */
forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
{
size_t const lastRun = (size_t)(iend - anchor);
if ((outputLimited) &&
/* Check output buffer overflow */
((op - (BYTE *)dest) + lastRun + 1 +
((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
return 0;
if (lastRun >= RUN_MASK) {
size_t accumulator = lastRun - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
for (; accumulator >= 255; accumulator -= 255)
*op++ = 255;
*op++ = (BYTE) accumulator;
} else {
*op++ = (BYTE)(lastRun << ML_BITS);
}
LZ4_memcpy(op, anchor, lastRun);
op += lastRun;
}
/* End */
return (int) (((char *)op) - dest);
}
static int LZ4_compress_fast_extState(
void *state,
const char *source,
char *dest,
int inputSize,
int maxOutputSize,
int acceleration)
{
LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
#if LZ4_ARCH64
const tableType_t tableType = byU32;
#else
const tableType_t tableType = byPtr;
#endif
LZ4_resetStream((LZ4_stream_t *)state);
if (acceleration < 1)
acceleration = LZ4_ACCELERATION_DEFAULT;
if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
if (inputSize < LZ4_64Klimit)
return LZ4_compress_generic(ctx, source,
dest, inputSize, 0,
noLimit, byU16, noDict,
noDictIssue, acceleration);
else
return LZ4_compress_generic(ctx, source,
dest, inputSize, 0,
noLimit, tableType, noDict,
noDictIssue, acceleration);
} else {
if (inputSize < LZ4_64Klimit)
return LZ4_compress_generic(ctx, source,
dest, inputSize,
maxOutputSize, limitedOutput, byU16, noDict,
noDictIssue, acceleration);
else
return LZ4_compress_generic(ctx, source,
dest, inputSize,
maxOutputSize, limitedOutput, tableType, noDict,
noDictIssue, acceleration);
}
}
int LZ4_compress_fast(const char *source, char *dest, int inputSize,
int maxOutputSize, int acceleration, void *wrkmem)
{
return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
maxOutputSize, acceleration);
}
EXPORT_SYMBOL(LZ4_compress_fast);
int LZ4_compress_default(const char *source, char *dest, int inputSize,
int maxOutputSize, void *wrkmem)
{
return LZ4_compress_fast(source, dest, inputSize,
maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
}
EXPORT_SYMBOL(LZ4_compress_default);
/*-******************************
* *_destSize() variant
********************************/
static int LZ4_compress_destSize_generic(
LZ4_stream_t_internal * const ctx,
const char * const src,
char * const dst,
int * const srcSizePtr,
const int targetDstSize,
const tableType_t tableType)
{
const BYTE *ip = (const BYTE *) src;
const BYTE *base = (const BYTE *) src;
const BYTE *lowLimit = (const BYTE *) src;
const BYTE *anchor = ip;
const BYTE * const iend = ip + *srcSizePtr;
const BYTE * const mflimit = iend - MFLIMIT;
const BYTE * const matchlimit = iend - LASTLITERALS;
BYTE *op = (BYTE *) dst;
BYTE * const oend = op + targetDstSize;
BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
- 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
BYTE * const oMaxMatch = op + targetDstSize
- (LASTLITERALS + 1 /* token */);
BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
U32 forwardH;
/* Init conditions */
/* Impossible to store anything */
if (targetDstSize < 1)
return 0;
/* Unsupported input size, too large (or negative) */
if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
return 0;
/* Size too large (not within 64K limit) */
if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
return 0;
/* Input too small, no compression (all literals) */
if (*srcSizePtr < LZ4_minLength)
goto _last_literals;
/* First Byte */
*srcSizePtr = 0;
LZ4_putPosition(ip, ctx->hashTable, tableType, base);
ip++; forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
for ( ; ; ) {
const BYTE *match;
BYTE *token;
/* Find a match */
{
const BYTE *forwardIp = ip;
unsigned int step = 1;
unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
do {
U32 h = forwardH;
ip = forwardIp;
forwardIp += step;
step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
if (unlikely(forwardIp > mflimit))
goto _last_literals;
match = LZ4_getPositionOnHash(h, ctx->hashTable,
tableType, base);
forwardH = LZ4_hashPosition(forwardIp,
tableType);
LZ4_putPositionOnHash(ip, h,
ctx->hashTable, tableType,
base);
} while (((tableType == byU16)
? 0
: (match + MAX_DISTANCE < ip))
|| (LZ4_read32(match) != LZ4_read32(ip)));
}
/* Catch up */
while ((ip > anchor)
&& (match > lowLimit)
&& (unlikely(ip[-1] == match[-1]))) {
ip--;
match--;
}
/* Encode Literal length */
{
unsigned int litLength = (unsigned int)(ip - anchor);
token = op++;
if (op + ((litLength + 240) / 255)
+ litLength > oMaxLit) {
/* Not enough space for a last match */
op--;
goto _last_literals;
}
if (litLength >= RUN_MASK) {
unsigned int len = litLength - RUN_MASK;
*token = (RUN_MASK<<ML_BITS);
for (; len >= 255; len -= 255)
*op++ = 255;
*op++ = (BYTE)len;
} else
*token = (BYTE)(litLength << ML_BITS);
/* Copy Literals */
LZ4_wildCopy(op, anchor, op + litLength);
op += litLength;
}
_next_match:
/* Encode Offset */
LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
/* Encode MatchLength */
{
size_t matchLength = LZ4_count(ip + MINMATCH,
match + MINMATCH, matchlimit);
if (op + ((matchLength + 240)/255) > oMaxMatch) {
/* Match description too long : reduce it */
matchLength = (15 - 1) + (oMaxMatch - op) * 255;
}
ip += MINMATCH + matchLength;
if (matchLength >= ML_MASK) {
*token += ML_MASK;
matchLength -= ML_MASK;
while (matchLength >= 255) {
matchLength -= 255;
*op++ = 255;
}
*op++ = (BYTE)matchLength;
} else
*token += (BYTE)(matchLength);
}
anchor = ip;
/* Test end of block */
if (ip > mflimit)
break;
if (op > oMaxSeq)
break;
/* Fill table */
LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
/* Test next position */
match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
LZ4_putPosition(ip, ctx->hashTable, tableType, base);
if ((match + MAX_DISTANCE >= ip)
&& (LZ4_read32(match) == LZ4_read32(ip))) {
token = op++; *token = 0;
goto _next_match;
}
/* Prepare next loop */
forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
{
size_t lastRunSize = (size_t)(iend - anchor);
if (op + 1 /* token */
+ ((lastRunSize + 240) / 255) /* litLength */
+ lastRunSize /* literals */ > oend) {
/* adapt lastRunSize to fill 'dst' */
lastRunSize = (oend - op) - 1;
lastRunSize -= (lastRunSize + 240) / 255;
}
ip = anchor + lastRunSize;
if (lastRunSize >= RUN_MASK) {
size_t accumulator = lastRunSize - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
for (; accumulator >= 255; accumulator -= 255)
*op++ = 255;
*op++ = (BYTE) accumulator;
} else {
*op++ = (BYTE)(lastRunSize<<ML_BITS);
}
LZ4_memcpy(op, anchor, lastRunSize);
op += lastRunSize;
}
/* End */
*srcSizePtr = (int) (((const char *)ip) - src);
return (int) (((char *)op) - dst);
}
static int LZ4_compress_destSize_extState(
LZ4_stream_t *state,
const char *src,
char *dst,
int *srcSizePtr,
int targetDstSize)
{
#if LZ4_ARCH64
const tableType_t tableType = byU32;
#else
const tableType_t tableType = byPtr;
#endif
LZ4_resetStream(state);
if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
/* compression success is guaranteed */
return LZ4_compress_fast_extState(
state, src, dst, *srcSizePtr,
targetDstSize, 1);
} else {
if (*srcSizePtr < LZ4_64Klimit)
return LZ4_compress_destSize_generic(
&state->internal_donotuse,
src, dst, srcSizePtr,
targetDstSize, byU16);
else
return LZ4_compress_destSize_generic(
&state->internal_donotuse,
src, dst, srcSizePtr,
targetDstSize, tableType);
}
}
int LZ4_compress_destSize(
const char *src,
char *dst,
int *srcSizePtr,
int targetDstSize,
void *wrkmem)
{
return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
targetDstSize);
}
EXPORT_SYMBOL(LZ4_compress_destSize);
/*-******************************
* Streaming functions
********************************/
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
{
memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
}
int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
const char *dictionary, int dictSize)
{
LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
const BYTE *p = (const BYTE *)dictionary;
const BYTE * const dictEnd = p + dictSize;
const BYTE *base;
if ((dict->initCheck)
|| (dict->currentOffset > 1 * GB)) {
/* Uninitialized structure, or reuse overflow */
LZ4_resetStream(LZ4_dict);
}
if (dictSize < (int)HASH_UNIT) {
dict->dictionary = NULL;
dict->dictSize = 0;
return 0;
}
if ((dictEnd - p) > 64 * KB)
p = dictEnd - 64 * KB;
dict->currentOffset += 64 * KB;
base = p - dict->currentOffset;
dict->dictionary = p;
dict->dictSize = (U32)(dictEnd - p);
dict->currentOffset += dict->dictSize;
while (p <= dictEnd - HASH_UNIT) {
LZ4_putPosition(p, dict->hashTable, byU32, base);
p += 3;
}
return dict->dictSize;
}
EXPORT_SYMBOL(LZ4_loadDict);
static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
const BYTE *src)
{
if ((LZ4_dict->currentOffset > 0x80000000) ||
((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
/* address space overflow */
/* rescale hash table */
U32 const delta = LZ4_dict->currentOffset - 64 * KB;
const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
int i;
for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
if (LZ4_dict->hashTable[i] < delta)
LZ4_dict->hashTable[i] = 0;
else
LZ4_dict->hashTable[i] -= delta;
}
LZ4_dict->currentOffset = 64 * KB;
if (LZ4_dict->dictSize > 64 * KB)
LZ4_dict->dictSize = 64 * KB;
LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
}
}
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
{
LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
if ((U32)dictSize > 64 * KB) {
/* useless to define a dictionary > 64 * KB */
dictSize = 64 * KB;
}
if ((U32)dictSize > dict->dictSize)
dictSize = dict->dictSize;
memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
dict->dictionary = (const BYTE *)safeBuffer;
dict->dictSize = (U32)dictSize;
return dictSize;
}
EXPORT_SYMBOL(LZ4_saveDict);
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
char *dest, int inputSize, int maxOutputSize, int acceleration)
{
LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
const BYTE * const dictEnd = streamPtr->dictionary
+ streamPtr->dictSize;
const BYTE *smallest = (const BYTE *) source;
if (streamPtr->initCheck) {
/* Uninitialized structure detected */
return 0;
}
if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
smallest = dictEnd;
LZ4_renormDictT(streamPtr, smallest);
if (acceleration < 1)
acceleration = LZ4_ACCELERATION_DEFAULT;
/* Check overlapping input/dictionary space */
{
const BYTE *sourceEnd = (const BYTE *) source + inputSize;
if ((sourceEnd > streamPtr->dictionary)
&& (sourceEnd < dictEnd)) {
streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
if (streamPtr->dictSize > 64 * KB)
streamPtr->dictSize = 64 * KB;
if (streamPtr->dictSize < 4)
streamPtr->dictSize = 0;
streamPtr->dictionary = dictEnd - streamPtr->dictSize;
}
}
/* prefix mode : source data follows dictionary */
if (dictEnd == (const BYTE *)source) {
int result;
if ((streamPtr->dictSize < 64 * KB) &&
(streamPtr->dictSize < streamPtr->currentOffset)) {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
withPrefix64k, dictSmall, acceleration);
} else {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
withPrefix64k, noDictIssue, acceleration);
}
streamPtr->dictSize += (U32)inputSize;
streamPtr->currentOffset += (U32)inputSize;
return result;
}
/* external dictionary mode */
{
int result;
if ((streamPtr->dictSize < 64 * KB) &&
(streamPtr->dictSize < streamPtr->currentOffset)) {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
usingExtDict, dictSmall, acceleration);
} else {
result = LZ4_compress_generic(
streamPtr, source, dest, inputSize,
maxOutputSize, limitedOutput, byU32,
usingExtDict, noDictIssue, acceleration);
}
streamPtr->dictionary = (const BYTE *)source;
streamPtr->dictSize = (U32)inputSize;
streamPtr->currentOffset += (U32)inputSize;
return result;
}
}
EXPORT_SYMBOL(LZ4_compress_fast_continue);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");
| linux-master | lib/lz4/lz4_compress.c |
/*
* .xz Stream decoder
*
* Author: Lasse Collin <[email protected]>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
#include "xz_private.h"
#include "xz_stream.h"
/* Hash used to validate the Index field */
struct xz_dec_hash {
vli_type unpadded;
vli_type uncompressed;
uint32_t crc32;
};
struct xz_dec {
/* Position in dec_main() */
enum {
SEQ_STREAM_HEADER,
SEQ_BLOCK_START,
SEQ_BLOCK_HEADER,
SEQ_BLOCK_UNCOMPRESS,
SEQ_BLOCK_PADDING,
SEQ_BLOCK_CHECK,
SEQ_INDEX,
SEQ_INDEX_PADDING,
SEQ_INDEX_CRC32,
SEQ_STREAM_FOOTER
} sequence;
/* Position in variable-length integers and Check fields */
uint32_t pos;
/* Variable-length integer decoded by dec_vli() */
vli_type vli;
/* Saved in_pos and out_pos */
size_t in_start;
size_t out_start;
/* CRC32 value in Block or Index */
uint32_t crc32;
/* Type of the integrity check calculated from uncompressed data */
enum xz_check check_type;
/* Operation mode */
enum xz_mode mode;
/*
* True if the next call to xz_dec_run() is allowed to return
* XZ_BUF_ERROR.
*/
bool allow_buf_error;
/* Information stored in Block Header */
struct {
/*
* Value stored in the Compressed Size field, or
* VLI_UNKNOWN if Compressed Size is not present.
*/
vli_type compressed;
/*
* Value stored in the Uncompressed Size field, or
* VLI_UNKNOWN if Uncompressed Size is not present.
*/
vli_type uncompressed;
/* Size of the Block Header field */
uint32_t size;
} block_header;
/* Information collected when decoding Blocks */
struct {
/* Observed compressed size of the current Block */
vli_type compressed;
/* Observed uncompressed size of the current Block */
vli_type uncompressed;
/* Number of Blocks decoded so far */
vli_type count;
/*
* Hash calculated from the Block sizes. This is used to
* validate the Index field.
*/
struct xz_dec_hash hash;
} block;
/* Variables needed when verifying the Index field */
struct {
/* Position in dec_index() */
enum {
SEQ_INDEX_COUNT,
SEQ_INDEX_UNPADDED,
SEQ_INDEX_UNCOMPRESSED
} sequence;
/* Size of the Index in bytes */
vli_type size;
/* Number of Records (matches block.count in valid files) */
vli_type count;
/*
* Hash calculated from the Records (matches block.hash in
* valid files).
*/
struct xz_dec_hash hash;
} index;
/*
* Temporary buffer needed to hold Stream Header, Block Header,
* and Stream Footer. The Block Header is the biggest (1 KiB)
* so we reserve space according to that. buf[] has to be aligned
* to a multiple of four bytes; the size_t variables before it
* should guarantee this.
*/
struct {
size_t pos;
size_t size;
uint8_t buf[1024];
} temp;
struct xz_dec_lzma2 *lzma2;
#ifdef XZ_DEC_BCJ
struct xz_dec_bcj *bcj;
bool bcj_active;
#endif
};
#ifdef XZ_DEC_ANY_CHECK
/* Sizes of the Check field with different Check IDs */
static const uint8_t check_sizes[16] = {
0,
4, 4, 4,
8, 8, 8,
16, 16, 16,
32, 32, 32,
64, 64, 64
};
#endif
/*
* Fill s->temp by copying data starting from b->in[b->in_pos]. Caller
* must have set s->temp.pos to indicate how much data we are supposed
* to copy into s->temp.buf. Return true once s->temp.pos has reached
* s->temp.size.
*/
static bool fill_temp(struct xz_dec *s, struct xz_buf *b)
{
size_t copy_size = min_t(size_t,
b->in_size - b->in_pos, s->temp.size - s->temp.pos);
memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size);
b->in_pos += copy_size;
s->temp.pos += copy_size;
if (s->temp.pos == s->temp.size) {
s->temp.pos = 0;
return true;
}
return false;
}
/* Decode a variable-length integer (little-endian base-128 encoding) */
static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in,
size_t *in_pos, size_t in_size)
{
uint8_t byte;
if (s->pos == 0)
s->vli = 0;
while (*in_pos < in_size) {
byte = in[*in_pos];
++*in_pos;
s->vli |= (vli_type)(byte & 0x7F) << s->pos;
if ((byte & 0x80) == 0) {
/* Don't allow non-minimal encodings. */
if (byte == 0 && s->pos != 0)
return XZ_DATA_ERROR;
s->pos = 0;
return XZ_STREAM_END;
}
s->pos += 7;
if (s->pos == 7 * VLI_BYTES_MAX)
return XZ_DATA_ERROR;
}
return XZ_OK;
}
/*
* Decode the Compressed Data field from a Block. Update and validate
* the observed compressed and uncompressed sizes of the Block so that
* they don't exceed the values possibly stored in the Block Header
* (validation assumes that no integer overflow occurs, since vli_type
* is normally uint64_t). Update the CRC32 if presence of the CRC32
* field was indicated in Stream Header.
*
* Once the decoding is finished, validate that the observed sizes match
* the sizes possibly stored in the Block Header. Update the hash and
* Block count, which are later used to validate the Index field.
*/
static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b)
{
enum xz_ret ret;
s->in_start = b->in_pos;
s->out_start = b->out_pos;
#ifdef XZ_DEC_BCJ
if (s->bcj_active)
ret = xz_dec_bcj_run(s->bcj, s->lzma2, b);
else
#endif
ret = xz_dec_lzma2_run(s->lzma2, b);
s->block.compressed += b->in_pos - s->in_start;
s->block.uncompressed += b->out_pos - s->out_start;
/*
* There is no need to separately check for VLI_UNKNOWN, since
* the observed sizes are always smaller than VLI_UNKNOWN.
*/
if (s->block.compressed > s->block_header.compressed
|| s->block.uncompressed
> s->block_header.uncompressed)
return XZ_DATA_ERROR;
if (s->check_type == XZ_CHECK_CRC32)
s->crc32 = xz_crc32(b->out + s->out_start,
b->out_pos - s->out_start, s->crc32);
if (ret == XZ_STREAM_END) {
if (s->block_header.compressed != VLI_UNKNOWN
&& s->block_header.compressed
!= s->block.compressed)
return XZ_DATA_ERROR;
if (s->block_header.uncompressed != VLI_UNKNOWN
&& s->block_header.uncompressed
!= s->block.uncompressed)
return XZ_DATA_ERROR;
s->block.hash.unpadded += s->block_header.size
+ s->block.compressed;
#ifdef XZ_DEC_ANY_CHECK
s->block.hash.unpadded += check_sizes[s->check_type];
#else
if (s->check_type == XZ_CHECK_CRC32)
s->block.hash.unpadded += 4;
#endif
s->block.hash.uncompressed += s->block.uncompressed;
s->block.hash.crc32 = xz_crc32(
(const uint8_t *)&s->block.hash,
sizeof(s->block.hash), s->block.hash.crc32);
++s->block.count;
}
return ret;
}
/* Update the Index size and the CRC32 value. */
static void index_update(struct xz_dec *s, const struct xz_buf *b)
{
size_t in_used = b->in_pos - s->in_start;
s->index.size += in_used;
s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32);
}
/*
* Decode the Number of Records, Unpadded Size, and Uncompressed Size
* fields from the Index field. That is, Index Padding and CRC32 are not
* decoded by this function.
*
* This can return XZ_OK (more input needed), XZ_STREAM_END (everything
* successfully decoded), or XZ_DATA_ERROR (input is corrupt).
*/
static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b)
{
enum xz_ret ret;
do {
ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
if (ret != XZ_STREAM_END) {
index_update(s, b);
return ret;
}
switch (s->index.sequence) {
case SEQ_INDEX_COUNT:
s->index.count = s->vli;
/*
* Validate that the Number of Records field
* indicates the same number of Records as
* there were Blocks in the Stream.
*/
if (s->index.count != s->block.count)
return XZ_DATA_ERROR;
s->index.sequence = SEQ_INDEX_UNPADDED;
break;
case SEQ_INDEX_UNPADDED:
s->index.hash.unpadded += s->vli;
s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
break;
case SEQ_INDEX_UNCOMPRESSED:
s->index.hash.uncompressed += s->vli;
s->index.hash.crc32 = xz_crc32(
(const uint8_t *)&s->index.hash,
sizeof(s->index.hash),
s->index.hash.crc32);
--s->index.count;
s->index.sequence = SEQ_INDEX_UNPADDED;
break;
}
} while (s->index.count > 0);
return XZ_STREAM_END;
}
/*
* Validate that the next four input bytes match the value of s->crc32.
* s->pos must be zero when starting to validate the first byte.
*/
static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b)
{
do {
if (b->in_pos == b->in_size)
return XZ_OK;
if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++])
return XZ_DATA_ERROR;
s->pos += 8;
} while (s->pos < 32);
s->crc32 = 0;
s->pos = 0;
return XZ_STREAM_END;
}
#ifdef XZ_DEC_ANY_CHECK
/*
* Skip over the Check field when the Check ID is not supported.
* Returns true once the whole Check field has been skipped over.
*/
static bool check_skip(struct xz_dec *s, struct xz_buf *b)
{
while (s->pos < check_sizes[s->check_type]) {
if (b->in_pos == b->in_size)
return false;
++b->in_pos;
++s->pos;
}
s->pos = 0;
return true;
}
#endif
/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */
static enum xz_ret dec_stream_header(struct xz_dec *s)
{
if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE))
return XZ_FORMAT_ERROR;
if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0)
!= get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2))
return XZ_DATA_ERROR;
if (s->temp.buf[HEADER_MAGIC_SIZE] != 0)
return XZ_OPTIONS_ERROR;
/*
* Of integrity checks, we support only none (Check ID = 0) and
* CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined,
* we will accept other check types too, but then the check won't
* be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
*/
if (s->temp.buf[HEADER_MAGIC_SIZE + 1] > XZ_CHECK_MAX)
return XZ_OPTIONS_ERROR;
s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
#ifdef XZ_DEC_ANY_CHECK
if (s->check_type > XZ_CHECK_CRC32)
return XZ_UNSUPPORTED_CHECK;
#else
if (s->check_type > XZ_CHECK_CRC32)
return XZ_OPTIONS_ERROR;
#endif
return XZ_OK;
}
/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */
static enum xz_ret dec_stream_footer(struct xz_dec *s)
{
if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE))
return XZ_DATA_ERROR;
if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf))
return XZ_DATA_ERROR;
/*
* Validate Backward Size. Note that we never added the size of the
* Index CRC32 field to s->index.size, thus we use s->index.size / 4
* instead of s->index.size / 4 - 1.
*/
if ((s->index.size >> 2) != get_le32(s->temp.buf + 4))
return XZ_DATA_ERROR;
if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type)
return XZ_DATA_ERROR;
/*
* Use XZ_STREAM_END instead of XZ_OK to be more convenient
* for the caller.
*/
return XZ_STREAM_END;
}
/* Decode the Block Header and initialize the filter chain. */
static enum xz_ret dec_block_header(struct xz_dec *s)
{
enum xz_ret ret;
/*
* Validate the CRC32. We know that the temp buffer is at least
* eight bytes so this is safe.
*/
s->temp.size -= 4;
if (xz_crc32(s->temp.buf, s->temp.size, 0)
!= get_le32(s->temp.buf + s->temp.size))
return XZ_DATA_ERROR;
s->temp.pos = 2;
/*
* Catch unsupported Block Flags. We support only one or two filters
* in the chain, so we catch that with the same test.
*/
#ifdef XZ_DEC_BCJ
if (s->temp.buf[1] & 0x3E)
#else
if (s->temp.buf[1] & 0x3F)
#endif
return XZ_OPTIONS_ERROR;
/* Compressed Size */
if (s->temp.buf[1] & 0x40) {
if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
!= XZ_STREAM_END)
return XZ_DATA_ERROR;
s->block_header.compressed = s->vli;
} else {
s->block_header.compressed = VLI_UNKNOWN;
}
/* Uncompressed Size */
if (s->temp.buf[1] & 0x80) {
if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
!= XZ_STREAM_END)
return XZ_DATA_ERROR;
s->block_header.uncompressed = s->vli;
} else {
s->block_header.uncompressed = VLI_UNKNOWN;
}
#ifdef XZ_DEC_BCJ
/* If there are two filters, the first one must be a BCJ filter. */
s->bcj_active = s->temp.buf[1] & 0x01;
if (s->bcj_active) {
if (s->temp.size - s->temp.pos < 2)
return XZ_OPTIONS_ERROR;
ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]);
if (ret != XZ_OK)
return ret;
/*
* We don't support custom start offset,
* so Size of Properties must be zero.
*/
if (s->temp.buf[s->temp.pos++] != 0x00)
return XZ_OPTIONS_ERROR;
}
#endif
/* Valid Filter Flags always take at least two bytes. */
if (s->temp.size - s->temp.pos < 2)
return XZ_DATA_ERROR;
/* Filter ID = LZMA2 */
if (s->temp.buf[s->temp.pos++] != 0x21)
return XZ_OPTIONS_ERROR;
/* Size of Properties = 1-byte Filter Properties */
if (s->temp.buf[s->temp.pos++] != 0x01)
return XZ_OPTIONS_ERROR;
/* Filter Properties contains LZMA2 dictionary size. */
if (s->temp.size - s->temp.pos < 1)
return XZ_DATA_ERROR;
ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]);
if (ret != XZ_OK)
return ret;
/* The rest must be Header Padding. */
while (s->temp.pos < s->temp.size)
if (s->temp.buf[s->temp.pos++] != 0x00)
return XZ_OPTIONS_ERROR;
s->temp.pos = 0;
s->block.compressed = 0;
s->block.uncompressed = 0;
return XZ_OK;
}
static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
{
enum xz_ret ret;
/*
* Store the start position for the case when we are in the middle
* of the Index field.
*/
s->in_start = b->in_pos;
while (true) {
switch (s->sequence) {
case SEQ_STREAM_HEADER:
/*
* Stream Header is copied to s->temp, and then
* decoded from there. This way if the caller
* gives us only little input at a time, we can
* still keep the Stream Header decoding code
* simple. Similar approach is used in many places
* in this file.
*/
if (!fill_temp(s, b))
return XZ_OK;
/*
* If dec_stream_header() returns
* XZ_UNSUPPORTED_CHECK, it is still possible
* to continue decoding if working in multi-call
* mode. Thus, update s->sequence before calling
* dec_stream_header().
*/
s->sequence = SEQ_BLOCK_START;
ret = dec_stream_header(s);
if (ret != XZ_OK)
return ret;
fallthrough;
case SEQ_BLOCK_START:
/* We need one byte of input to continue. */
if (b->in_pos == b->in_size)
return XZ_OK;
/* See if this is the beginning of the Index field. */
if (b->in[b->in_pos] == 0) {
s->in_start = b->in_pos++;
s->sequence = SEQ_INDEX;
break;
}
/*
* Calculate the size of the Block Header and
* prepare to decode it.
*/
s->block_header.size
= ((uint32_t)b->in[b->in_pos] + 1) * 4;
s->temp.size = s->block_header.size;
s->temp.pos = 0;
s->sequence = SEQ_BLOCK_HEADER;
fallthrough;
case SEQ_BLOCK_HEADER:
if (!fill_temp(s, b))
return XZ_OK;
ret = dec_block_header(s);
if (ret != XZ_OK)
return ret;
s->sequence = SEQ_BLOCK_UNCOMPRESS;
fallthrough;
case SEQ_BLOCK_UNCOMPRESS:
ret = dec_block(s, b);
if (ret != XZ_STREAM_END)
return ret;
s->sequence = SEQ_BLOCK_PADDING;
fallthrough;
case SEQ_BLOCK_PADDING:
/*
* Size of Compressed Data + Block Padding
* must be a multiple of four. We don't need
* s->block.compressed for anything else
* anymore, so we use it here to test the size
* of the Block Padding field.
*/
while (s->block.compressed & 3) {
if (b->in_pos == b->in_size)
return XZ_OK;
if (b->in[b->in_pos++] != 0)
return XZ_DATA_ERROR;
++s->block.compressed;
}
s->sequence = SEQ_BLOCK_CHECK;
fallthrough;
case SEQ_BLOCK_CHECK:
if (s->check_type == XZ_CHECK_CRC32) {
ret = crc32_validate(s, b);
if (ret != XZ_STREAM_END)
return ret;
}
#ifdef XZ_DEC_ANY_CHECK
else if (!check_skip(s, b)) {
return XZ_OK;
}
#endif
s->sequence = SEQ_BLOCK_START;
break;
case SEQ_INDEX:
ret = dec_index(s, b);
if (ret != XZ_STREAM_END)
return ret;
s->sequence = SEQ_INDEX_PADDING;
fallthrough;
case SEQ_INDEX_PADDING:
while ((s->index.size + (b->in_pos - s->in_start))
& 3) {
if (b->in_pos == b->in_size) {
index_update(s, b);
return XZ_OK;
}
if (b->in[b->in_pos++] != 0)
return XZ_DATA_ERROR;
}
/* Finish the CRC32 value and Index size. */
index_update(s, b);
/* Compare the hashes to validate the Index field. */
if (!memeq(&s->block.hash, &s->index.hash,
sizeof(s->block.hash)))
return XZ_DATA_ERROR;
s->sequence = SEQ_INDEX_CRC32;
fallthrough;
case SEQ_INDEX_CRC32:
ret = crc32_validate(s, b);
if (ret != XZ_STREAM_END)
return ret;
s->temp.size = STREAM_HEADER_SIZE;
s->sequence = SEQ_STREAM_FOOTER;
fallthrough;
case SEQ_STREAM_FOOTER:
if (!fill_temp(s, b))
return XZ_OK;
return dec_stream_footer(s);
}
}
/* Never reached */
}
/*
* xz_dec_run() is a wrapper for dec_main() to handle some special cases in
* multi-call and single-call decoding.
*
* In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we
* are not going to make any progress anymore. This is to prevent the caller
* from calling us infinitely when the input file is truncated or otherwise
* corrupt. Since zlib-style API allows that the caller fills the input buffer
* only when the decoder doesn't produce any new output, we have to be careful
* to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only
* after the second consecutive call to xz_dec_run() that makes no progress.
*
* In single-call mode, if we couldn't decode everything and no error
* occurred, either the input is truncated or the output buffer is too small.
* Since we know that the last input byte never produces any output, we know
* that if all the input was consumed and decoding wasn't finished, the file
* must be corrupt. Otherwise the output buffer has to be too small or the
* file is corrupt in a way that decoding it produces too big output.
*
* If single-call decoding fails, we reset b->in_pos and b->out_pos back to
* their original values. This is because with some filter chains there won't
* be any valid uncompressed data in the output buffer unless the decoding
* actually succeeds (that's the price to pay of using the output buffer as
* the workspace).
*/
XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
{
size_t in_start;
size_t out_start;
enum xz_ret ret;
if (DEC_IS_SINGLE(s->mode))
xz_dec_reset(s);
in_start = b->in_pos;
out_start = b->out_pos;
ret = dec_main(s, b);
if (DEC_IS_SINGLE(s->mode)) {
if (ret == XZ_OK)
ret = b->in_pos == b->in_size
? XZ_DATA_ERROR : XZ_BUF_ERROR;
if (ret != XZ_STREAM_END) {
b->in_pos = in_start;
b->out_pos = out_start;
}
} else if (ret == XZ_OK && in_start == b->in_pos
&& out_start == b->out_pos) {
if (s->allow_buf_error)
ret = XZ_BUF_ERROR;
s->allow_buf_error = true;
} else {
s->allow_buf_error = false;
}
return ret;
}
XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
{
struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
return NULL;
s->mode = mode;
#ifdef XZ_DEC_BCJ
s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode));
if (s->bcj == NULL)
goto error_bcj;
#endif
s->lzma2 = xz_dec_lzma2_create(mode, dict_max);
if (s->lzma2 == NULL)
goto error_lzma2;
xz_dec_reset(s);
return s;
error_lzma2:
#ifdef XZ_DEC_BCJ
xz_dec_bcj_end(s->bcj);
error_bcj:
#endif
kfree(s);
return NULL;
}
XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
{
s->sequence = SEQ_STREAM_HEADER;
s->allow_buf_error = false;
s->pos = 0;
s->crc32 = 0;
memzero(&s->block, sizeof(s->block));
memzero(&s->index, sizeof(s->index));
s->temp.pos = 0;
s->temp.size = STREAM_HEADER_SIZE;
}
XZ_EXTERN void xz_dec_end(struct xz_dec *s)
{
if (s != NULL) {
xz_dec_lzma2_end(s->lzma2);
#ifdef XZ_DEC_BCJ
xz_dec_bcj_end(s->bcj);
#endif
kfree(s);
}
}
| linux-master | lib/xz/xz_dec_stream.c |
/*
* CRC32 using the polynomial from IEEE-802.3
*
* Authors: Lasse Collin <[email protected]>
* Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
/*
* This is not the fastest implementation, but it is pretty compact.
* The fastest versions of xz_crc32() on modern CPUs without hardware
* accelerated CRC instruction are 3-5 times as fast as this version,
* but they are bigger and use more memory for the lookup table.
*/
#include "xz_private.h"
/*
* STATIC_RW_DATA is used in the pre-boot environment on some architectures.
* See <linux/decompress/mm.h> for details.
*/
#ifndef STATIC_RW_DATA
# define STATIC_RW_DATA static
#endif
STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void)
{
const uint32_t poly = CRC32_POLY_LE;
uint32_t i;
uint32_t j;
uint32_t r;
for (i = 0; i < 256; ++i) {
r = i;
for (j = 0; j < 8; ++j)
r = (r >> 1) ^ (poly & ~((r & 1) - 1));
xz_crc32_table[i] = r;
}
return;
}
XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
{
crc = ~crc;
while (size != 0) {
crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
--size;
}
return ~crc;
}
| linux-master | lib/xz/xz_crc32.c |
/*
* Branch/Call/Jump (BCJ) filter decoders
*
* Authors: Lasse Collin <[email protected]>
* Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
#include "xz_private.h"
/*
* The rest of the file is inside this ifdef. It makes things a little more
* convenient when building without support for any BCJ filters.
*/
#ifdef XZ_DEC_BCJ
struct xz_dec_bcj {
/* Type of the BCJ filter being used */
enum {
BCJ_X86 = 4, /* x86 or x86-64 */
BCJ_POWERPC = 5, /* Big endian only */
BCJ_IA64 = 6, /* Big or little endian */
BCJ_ARM = 7, /* Little endian only */
BCJ_ARMTHUMB = 8, /* Little endian only */
BCJ_SPARC = 9 /* Big or little endian */
} type;
/*
* Return value of the next filter in the chain. We need to preserve
* this information across calls, because we must not call the next
* filter anymore once it has returned XZ_STREAM_END.
*/
enum xz_ret ret;
/* True if we are operating in single-call mode. */
bool single_call;
/*
* Absolute position relative to the beginning of the uncompressed
* data (in a single .xz Block). We care only about the lowest 32
* bits so this doesn't need to be uint64_t even with big files.
*/
uint32_t pos;
/* x86 filter state */
uint32_t x86_prev_mask;
/* Temporary space to hold the variables from struct xz_buf */
uint8_t *out;
size_t out_pos;
size_t out_size;
struct {
/* Amount of already filtered data in the beginning of buf */
size_t filtered;
/* Total amount of data currently stored in buf */
size_t size;
/*
* Buffer to hold a mix of filtered and unfiltered data. This
* needs to be big enough to hold Alignment + 2 * Look-ahead:
*
* Type Alignment Look-ahead
* x86 1 4
* PowerPC 4 0
* IA-64 16 0
* ARM 4 0
* ARM-Thumb 2 2
* SPARC 4 0
*/
uint8_t buf[16];
} temp;
};
#ifdef XZ_DEC_X86
/*
* This is used to test the most significant byte of a memory address
* in an x86 instruction.
*/
static inline int bcj_x86_test_msbyte(uint8_t b)
{
return b == 0x00 || b == 0xFF;
}
static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
{
static const bool mask_to_allowed_status[8]
= { true, true, true, false, true, false, false, false };
static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 };
size_t i;
size_t prev_pos = (size_t)-1;
uint32_t prev_mask = s->x86_prev_mask;
uint32_t src;
uint32_t dest;
uint32_t j;
uint8_t b;
if (size <= 4)
return 0;
size -= 4;
for (i = 0; i < size; ++i) {
if ((buf[i] & 0xFE) != 0xE8)
continue;
prev_pos = i - prev_pos;
if (prev_pos > 3) {
prev_mask = 0;
} else {
prev_mask = (prev_mask << (prev_pos - 1)) & 7;
if (prev_mask != 0) {
b = buf[i + 4 - mask_to_bit_num[prev_mask]];
if (!mask_to_allowed_status[prev_mask]
|| bcj_x86_test_msbyte(b)) {
prev_pos = i;
prev_mask = (prev_mask << 1) | 1;
continue;
}
}
}
prev_pos = i;
if (bcj_x86_test_msbyte(buf[i + 4])) {
src = get_unaligned_le32(buf + i + 1);
while (true) {
dest = src - (s->pos + (uint32_t)i + 5);
if (prev_mask == 0)
break;
j = mask_to_bit_num[prev_mask] * 8;
b = (uint8_t)(dest >> (24 - j));
if (!bcj_x86_test_msbyte(b))
break;
src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
}
dest &= 0x01FFFFFF;
dest |= (uint32_t)0 - (dest & 0x01000000);
put_unaligned_le32(dest, buf + i + 1);
i += 4;
} else {
prev_mask = (prev_mask << 1) | 1;
}
}
prev_pos = i - prev_pos;
s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
return i;
}
#endif
#ifdef XZ_DEC_POWERPC
static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
{
size_t i;
uint32_t instr;
for (i = 0; i + 4 <= size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr & 0xFC000003) == 0x48000001) {
instr &= 0x03FFFFFC;
instr -= s->pos + (uint32_t)i;
instr &= 0x03FFFFFC;
instr |= 0x48000001;
put_unaligned_be32(instr, buf + i);
}
}
return i;
}
#endif
#ifdef XZ_DEC_IA64
static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
{
static const uint8_t branch_table[32] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 4, 6, 6, 0, 0, 7, 7,
4, 4, 0, 0, 4, 4, 0, 0
};
/*
* The local variables take a little bit stack space, but it's less
* than what LZMA2 decoder takes, so it doesn't make sense to reduce
* stack usage here without doing that for the LZMA2 decoder too.
*/
/* Loop counters */
size_t i;
size_t j;
/* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
uint32_t slot;
/* Bitwise offset of the instruction indicated by slot */
uint32_t bit_pos;
/* bit_pos split into byte and bit parts */
uint32_t byte_pos;
uint32_t bit_res;
/* Address part of an instruction */
uint32_t addr;
/* Mask used to detect which instructions to convert */
uint32_t mask;
/* 41-bit instruction stored somewhere in the lowest 48 bits */
uint64_t instr;
/* Instruction normalized with bit_res for easier manipulation */
uint64_t norm;
for (i = 0; i + 16 <= size; i += 16) {
mask = branch_table[buf[i] & 0x1F];
for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
if (((mask >> slot) & 1) == 0)
continue;
byte_pos = bit_pos >> 3;
bit_res = bit_pos & 7;
instr = 0;
for (j = 0; j < 6; ++j)
instr |= (uint64_t)(buf[i + j + byte_pos])
<< (8 * j);
norm = instr >> bit_res;
if (((norm >> 37) & 0x0F) == 0x05
&& ((norm >> 9) & 0x07) == 0) {
addr = (norm >> 13) & 0x0FFFFF;
addr |= ((uint32_t)(norm >> 36) & 1) << 20;
addr <<= 4;
addr -= s->pos + (uint32_t)i;
addr >>= 4;
norm &= ~((uint64_t)0x8FFFFF << 13);
norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
norm |= (uint64_t)(addr & 0x100000)
<< (36 - 20);
instr &= (1 << bit_res) - 1;
instr |= norm << bit_res;
for (j = 0; j < 6; j++)
buf[i + j + byte_pos]
= (uint8_t)(instr >> (8 * j));
}
}
}
return i;
}
#endif
#ifdef XZ_DEC_ARM
static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
{
size_t i;
uint32_t addr;
for (i = 0; i + 4 <= size; i += 4) {
if (buf[i + 3] == 0xEB) {
addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
| ((uint32_t)buf[i + 2] << 16);
addr <<= 2;
addr -= s->pos + (uint32_t)i + 8;
addr >>= 2;
buf[i] = (uint8_t)addr;
buf[i + 1] = (uint8_t)(addr >> 8);
buf[i + 2] = (uint8_t)(addr >> 16);
}
}
return i;
}
#endif
#ifdef XZ_DEC_ARMTHUMB
static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
{
size_t i;
uint32_t addr;
for (i = 0; i + 4 <= size; i += 2) {
if ((buf[i + 1] & 0xF8) == 0xF0
&& (buf[i + 3] & 0xF8) == 0xF8) {
addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
| ((uint32_t)buf[i] << 11)
| (((uint32_t)buf[i + 3] & 0x07) << 8)
| (uint32_t)buf[i + 2];
addr <<= 1;
addr -= s->pos + (uint32_t)i + 4;
addr >>= 1;
buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
buf[i] = (uint8_t)(addr >> 11);
buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
buf[i + 2] = (uint8_t)addr;
i += 2;
}
}
return i;
}
#endif
#ifdef XZ_DEC_SPARC
static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
{
size_t i;
uint32_t instr;
for (i = 0; i + 4 <= size; i += 4) {
instr = get_unaligned_be32(buf + i);
if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
instr <<= 2;
instr -= s->pos + (uint32_t)i;
instr >>= 2;
instr = ((uint32_t)0x40000000 - (instr & 0x400000))
| 0x40000000 | (instr & 0x3FFFFF);
put_unaligned_be32(instr, buf + i);
}
}
return i;
}
#endif
/*
* Apply the selected BCJ filter. Update *pos and s->pos to match the amount
* of data that got filtered.
*
* NOTE: This is implemented as a switch statement to avoid using function
* pointers, which could be problematic in the kernel boot code, which must
* avoid pointers to static data (at least on x86).
*/
static void bcj_apply(struct xz_dec_bcj *s,
uint8_t *buf, size_t *pos, size_t size)
{
size_t filtered;
buf += *pos;
size -= *pos;
switch (s->type) {
#ifdef XZ_DEC_X86
case BCJ_X86:
filtered = bcj_x86(s, buf, size);
break;
#endif
#ifdef XZ_DEC_POWERPC
case BCJ_POWERPC:
filtered = bcj_powerpc(s, buf, size);
break;
#endif
#ifdef XZ_DEC_IA64
case BCJ_IA64:
filtered = bcj_ia64(s, buf, size);
break;
#endif
#ifdef XZ_DEC_ARM
case BCJ_ARM:
filtered = bcj_arm(s, buf, size);
break;
#endif
#ifdef XZ_DEC_ARMTHUMB
case BCJ_ARMTHUMB:
filtered = bcj_armthumb(s, buf, size);
break;
#endif
#ifdef XZ_DEC_SPARC
case BCJ_SPARC:
filtered = bcj_sparc(s, buf, size);
break;
#endif
default:
/* Never reached but silence compiler warnings. */
filtered = 0;
break;
}
*pos += filtered;
s->pos += filtered;
}
/*
* Flush pending filtered data from temp to the output buffer.
* Move the remaining mixture of possibly filtered and unfiltered
* data to the beginning of temp.
*/
static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
{
size_t copy_size;
copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos);
memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
b->out_pos += copy_size;
s->temp.filtered -= copy_size;
s->temp.size -= copy_size;
memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
}
/*
* The BCJ filter functions are primitive in sense that they process the
* data in chunks of 1-16 bytes. To hide this issue, this function does
* some buffering.
*/
XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
struct xz_dec_lzma2 *lzma2,
struct xz_buf *b)
{
size_t out_start;
/*
* Flush pending already filtered data to the output buffer. Return
* immediately if we couldn't flush everything, or if the next
* filter in the chain had already returned XZ_STREAM_END.
*/
if (s->temp.filtered > 0) {
bcj_flush(s, b);
if (s->temp.filtered > 0)
return XZ_OK;
if (s->ret == XZ_STREAM_END)
return XZ_STREAM_END;
}
/*
* If we have more output space than what is currently pending in
* temp, copy the unfiltered data from temp to the output buffer
* and try to fill the output buffer by decoding more data from the
* next filter in the chain. Apply the BCJ filter on the new data
* in the output buffer. If everything cannot be filtered, copy it
* to temp and rewind the output buffer position accordingly.
*
* This needs to be always run when temp.size == 0 to handle a special
* case where the output buffer is full and the next filter has no
* more output coming but hasn't returned XZ_STREAM_END yet.
*/
if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
out_start = b->out_pos;
memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
b->out_pos += s->temp.size;
s->ret = xz_dec_lzma2_run(lzma2, b);
if (s->ret != XZ_STREAM_END
&& (s->ret != XZ_OK || s->single_call))
return s->ret;
bcj_apply(s, b->out, &out_start, b->out_pos);
/*
* As an exception, if the next filter returned XZ_STREAM_END,
* we can do that too, since the last few bytes that remain
* unfiltered are meant to remain unfiltered.
*/
if (s->ret == XZ_STREAM_END)
return XZ_STREAM_END;
s->temp.size = b->out_pos - out_start;
b->out_pos -= s->temp.size;
memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
/*
* If there wasn't enough input to the next filter to fill
* the output buffer with unfiltered data, there's no point
* to try decoding more data to temp.
*/
if (b->out_pos + s->temp.size < b->out_size)
return XZ_OK;
}
/*
* We have unfiltered data in temp. If the output buffer isn't full
* yet, try to fill the temp buffer by decoding more data from the
* next filter. Apply the BCJ filter on temp. Then we hopefully can
* fill the actual output buffer by copying filtered data from temp.
* A mix of filtered and unfiltered data may be left in temp; it will
* be taken care on the next call to this function.
*/
if (b->out_pos < b->out_size) {
/* Make b->out{,_pos,_size} temporarily point to s->temp. */
s->out = b->out;
s->out_pos = b->out_pos;
s->out_size = b->out_size;
b->out = s->temp.buf;
b->out_pos = s->temp.size;
b->out_size = sizeof(s->temp.buf);
s->ret = xz_dec_lzma2_run(lzma2, b);
s->temp.size = b->out_pos;
b->out = s->out;
b->out_pos = s->out_pos;
b->out_size = s->out_size;
if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
return s->ret;
bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
/*
* If the next filter returned XZ_STREAM_END, we mark that
* everything is filtered, since the last unfiltered bytes
* of the stream are meant to be left as is.
*/
if (s->ret == XZ_STREAM_END)
s->temp.filtered = s->temp.size;
bcj_flush(s, b);
if (s->temp.filtered > 0)
return XZ_OK;
}
return s->ret;
}
XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
{
struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s != NULL)
s->single_call = single_call;
return s;
}
XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
{
switch (id) {
#ifdef XZ_DEC_X86
case BCJ_X86:
#endif
#ifdef XZ_DEC_POWERPC
case BCJ_POWERPC:
#endif
#ifdef XZ_DEC_IA64
case BCJ_IA64:
#endif
#ifdef XZ_DEC_ARM
case BCJ_ARM:
#endif
#ifdef XZ_DEC_ARMTHUMB
case BCJ_ARMTHUMB:
#endif
#ifdef XZ_DEC_SPARC
case BCJ_SPARC:
#endif
break;
default:
/* Unsupported Filter ID */
return XZ_OPTIONS_ERROR;
}
s->type = id;
s->ret = XZ_OK;
s->pos = 0;
s->x86_prev_mask = 0;
s->temp.filtered = 0;
s->temp.size = 0;
return XZ_OK;
}
#endif
| linux-master | lib/xz/xz_dec_bcj.c |
/*
* LZMA2 decoder
*
* Authors: Lasse Collin <[email protected]>
* Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
#include "xz_private.h"
#include "xz_lzma2.h"
/*
* Range decoder initialization eats the first five bytes of each LZMA chunk.
*/
#define RC_INIT_BYTES 5
/*
* Minimum number of usable input buffer to safely decode one LZMA symbol.
* The worst case is that we decode 22 bits using probabilities and 26
* direct bits. This may decode at maximum of 20 bytes of input. However,
* lzma_main() does an extra normalization before returning, thus we
* need to put 21 here.
*/
#define LZMA_IN_REQUIRED 21
/*
* Dictionary (history buffer)
*
* These are always true:
* start <= pos <= full <= end
* pos <= limit <= end
*
* In multi-call mode, also these are true:
* end == size
* size <= size_max
* allocated <= size
*
* Most of these variables are size_t to support single-call mode,
* in which the dictionary variables address the actual output
* buffer directly.
*/
struct dictionary {
/* Beginning of the history buffer */
uint8_t *buf;
/* Old position in buf (before decoding more data) */
size_t start;
/* Position in buf */
size_t pos;
/*
* How full dictionary is. This is used to detect corrupt input that
* would read beyond the beginning of the uncompressed stream.
*/
size_t full;
/* Write limit; we don't write to buf[limit] or later bytes. */
size_t limit;
/*
* End of the dictionary buffer. In multi-call mode, this is
* the same as the dictionary size. In single-call mode, this
* indicates the size of the output buffer.
*/
size_t end;
/*
* Size of the dictionary as specified in Block Header. This is used
* together with "full" to detect corrupt input that would make us
* read beyond the beginning of the uncompressed stream.
*/
uint32_t size;
/*
* Maximum allowed dictionary size in multi-call mode.
* This is ignored in single-call mode.
*/
uint32_t size_max;
/*
* Amount of memory currently allocated for the dictionary.
* This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
* size_max is always the same as the allocated size.)
*/
uint32_t allocated;
/* Operation mode */
enum xz_mode mode;
};
/* Range decoder */
struct rc_dec {
uint32_t range;
uint32_t code;
/*
* Number of initializing bytes remaining to be read
* by rc_read_init().
*/
uint32_t init_bytes_left;
/*
* Buffer from which we read our input. It can be either
* temp.buf or the caller-provided input buffer.
*/
const uint8_t *in;
size_t in_pos;
size_t in_limit;
};
/* Probabilities for a length decoder. */
struct lzma_len_dec {
/* Probability of match length being at least 10 */
uint16_t choice;
/* Probability of match length being at least 18 */
uint16_t choice2;
/* Probabilities for match lengths 2-9 */
uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
/* Probabilities for match lengths 10-17 */
uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
/* Probabilities for match lengths 18-273 */
uint16_t high[LEN_HIGH_SYMBOLS];
};
struct lzma_dec {
/* Distances of latest four matches */
uint32_t rep0;
uint32_t rep1;
uint32_t rep2;
uint32_t rep3;
/* Types of the most recently seen LZMA symbols */
enum lzma_state state;
/*
* Length of a match. This is updated so that dict_repeat can
* be called again to finish repeating the whole match.
*/
uint32_t len;
/*
* LZMA properties or related bit masks (number of literal
* context bits, a mask derived from the number of literal
* position bits, and a mask derived from the number
* position bits)
*/
uint32_t lc;
uint32_t literal_pos_mask; /* (1 << lp) - 1 */
uint32_t pos_mask; /* (1 << pb) - 1 */
/* If 1, it's a match. Otherwise it's a single 8-bit literal. */
uint16_t is_match[STATES][POS_STATES_MAX];
/* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
uint16_t is_rep[STATES];
/*
* If 0, distance of a repeated match is rep0.
* Otherwise check is_rep1.
*/
uint16_t is_rep0[STATES];
/*
* If 0, distance of a repeated match is rep1.
* Otherwise check is_rep2.
*/
uint16_t is_rep1[STATES];
/* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
uint16_t is_rep2[STATES];
/*
* If 1, the repeated match has length of one byte. Otherwise
* the length is decoded from rep_len_decoder.
*/
uint16_t is_rep0_long[STATES][POS_STATES_MAX];
/*
* Probability tree for the highest two bits of the match
* distance. There is a separate probability tree for match
* lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
*/
uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
/*
* Probility trees for additional bits for match distance
* when the distance is in the range [4, 127].
*/
uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
/*
* Probability tree for the lowest four bits of a match
* distance that is equal to or greater than 128.
*/
uint16_t dist_align[ALIGN_SIZE];
/* Length of a normal match */
struct lzma_len_dec match_len_dec;
/* Length of a repeated match */
struct lzma_len_dec rep_len_dec;
/* Probabilities of literals */
uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
};
struct lzma2_dec {
/* Position in xz_dec_lzma2_run(). */
enum lzma2_seq {
SEQ_CONTROL,
SEQ_UNCOMPRESSED_1,
SEQ_UNCOMPRESSED_2,
SEQ_COMPRESSED_0,
SEQ_COMPRESSED_1,
SEQ_PROPERTIES,
SEQ_LZMA_PREPARE,
SEQ_LZMA_RUN,
SEQ_COPY
} sequence;
/* Next position after decoding the compressed size of the chunk. */
enum lzma2_seq next_sequence;
/* Uncompressed size of LZMA chunk (2 MiB at maximum) */
uint32_t uncompressed;
/*
* Compressed size of LZMA chunk or compressed/uncompressed
* size of uncompressed chunk (64 KiB at maximum)
*/
uint32_t compressed;
/*
* True if dictionary reset is needed. This is false before
* the first chunk (LZMA or uncompressed).
*/
bool need_dict_reset;
/*
* True if new LZMA properties are needed. This is false
* before the first LZMA chunk.
*/
bool need_props;
#ifdef XZ_DEC_MICROLZMA
bool pedantic_microlzma;
#endif
};
struct xz_dec_lzma2 {
/*
* The order below is important on x86 to reduce code size and
* it shouldn't hurt on other platforms. Everything up to and
* including lzma.pos_mask are in the first 128 bytes on x86-32,
* which allows using smaller instructions to access those
* variables. On x86-64, fewer variables fit into the first 128
* bytes, but this is still the best order without sacrificing
* the readability by splitting the structures.
*/
struct rc_dec rc;
struct dictionary dict;
struct lzma2_dec lzma2;
struct lzma_dec lzma;
/*
* Temporary buffer which holds small number of input bytes between
* decoder calls. See lzma2_lzma() for details.
*/
struct {
uint32_t size;
uint8_t buf[3 * LZMA_IN_REQUIRED];
} temp;
};
/**************
* Dictionary *
**************/
/*
* Reset the dictionary state. When in single-call mode, set up the beginning
* of the dictionary to point to the actual output buffer.
*/
static void dict_reset(struct dictionary *dict, struct xz_buf *b)
{
if (DEC_IS_SINGLE(dict->mode)) {
dict->buf = b->out + b->out_pos;
dict->end = b->out_size - b->out_pos;
}
dict->start = 0;
dict->pos = 0;
dict->limit = 0;
dict->full = 0;
}
/* Set dictionary write limit */
static void dict_limit(struct dictionary *dict, size_t out_max)
{
if (dict->end - dict->pos <= out_max)
dict->limit = dict->end;
else
dict->limit = dict->pos + out_max;
}
/* Return true if at least one byte can be written into the dictionary. */
static inline bool dict_has_space(const struct dictionary *dict)
{
return dict->pos < dict->limit;
}
/*
* Get a byte from the dictionary at the given distance. The distance is
* assumed to valid, or as a special case, zero when the dictionary is
* still empty. This special case is needed for single-call decoding to
* avoid writing a '\0' to the end of the destination buffer.
*/
static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
{
size_t offset = dict->pos - dist - 1;
if (dist >= dict->pos)
offset += dict->end;
return dict->full > 0 ? dict->buf[offset] : 0;
}
/*
* Put one byte into the dictionary. It is assumed that there is space for it.
*/
static inline void dict_put(struct dictionary *dict, uint8_t byte)
{
dict->buf[dict->pos++] = byte;
if (dict->full < dict->pos)
dict->full = dict->pos;
}
/*
* Repeat given number of bytes from the given distance. If the distance is
* invalid, false is returned. On success, true is returned and *len is
* updated to indicate how many bytes were left to be repeated.
*/
static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
{
size_t back;
uint32_t left;
if (dist >= dict->full || dist >= dict->size)
return false;
left = min_t(size_t, dict->limit - dict->pos, *len);
*len -= left;
back = dict->pos - dist - 1;
if (dist >= dict->pos)
back += dict->end;
do {
dict->buf[dict->pos++] = dict->buf[back++];
if (back == dict->end)
back = 0;
} while (--left > 0);
if (dict->full < dict->pos)
dict->full = dict->pos;
return true;
}
/* Copy uncompressed data as is from input to dictionary and output buffers. */
static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
uint32_t *left)
{
size_t copy_size;
while (*left > 0 && b->in_pos < b->in_size
&& b->out_pos < b->out_size) {
copy_size = min(b->in_size - b->in_pos,
b->out_size - b->out_pos);
if (copy_size > dict->end - dict->pos)
copy_size = dict->end - dict->pos;
if (copy_size > *left)
copy_size = *left;
*left -= copy_size;
/*
* If doing in-place decompression in single-call mode and the
* uncompressed size of the file is larger than the caller
* thought (i.e. it is invalid input!), the buffers below may
* overlap and cause undefined behavior with memcpy().
* With valid inputs memcpy() would be fine here.
*/
memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
dict->pos += copy_size;
if (dict->full < dict->pos)
dict->full = dict->pos;
if (DEC_IS_MULTI(dict->mode)) {
if (dict->pos == dict->end)
dict->pos = 0;
/*
* Like above but for multi-call mode: use memmove()
* to avoid undefined behavior with invalid input.
*/
memmove(b->out + b->out_pos, b->in + b->in_pos,
copy_size);
}
dict->start = dict->pos;
b->out_pos += copy_size;
b->in_pos += copy_size;
}
}
#ifdef XZ_DEC_MICROLZMA
# define DICT_FLUSH_SUPPORTS_SKIPPING true
#else
# define DICT_FLUSH_SUPPORTS_SKIPPING false
#endif
/*
* Flush pending data from dictionary to b->out. It is assumed that there is
* enough space in b->out. This is guaranteed because caller uses dict_limit()
* before decoding data into the dictionary.
*/
static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
{
size_t copy_size = dict->pos - dict->start;
if (DEC_IS_MULTI(dict->mode)) {
if (dict->pos == dict->end)
dict->pos = 0;
/*
* These buffers cannot overlap even if doing in-place
* decompression because in multi-call mode dict->buf
* has been allocated by us in this file; it's not
* provided by the caller like in single-call mode.
*
* With MicroLZMA, b->out can be NULL to skip bytes that
* the caller doesn't need. This cannot be done with XZ
* because it would break BCJ filters.
*/
if (!DICT_FLUSH_SUPPORTS_SKIPPING || b->out != NULL)
memcpy(b->out + b->out_pos, dict->buf + dict->start,
copy_size);
}
dict->start = dict->pos;
b->out_pos += copy_size;
return copy_size;
}
/*****************
* Range decoder *
*****************/
/* Reset the range decoder. */
static void rc_reset(struct rc_dec *rc)
{
rc->range = (uint32_t)-1;
rc->code = 0;
rc->init_bytes_left = RC_INIT_BYTES;
}
/*
* Read the first five initial bytes into rc->code if they haven't been
* read already. (Yes, the first byte gets completely ignored.)
*/
static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
{
while (rc->init_bytes_left > 0) {
if (b->in_pos == b->in_size)
return false;
rc->code = (rc->code << 8) + b->in[b->in_pos++];
--rc->init_bytes_left;
}
return true;
}
/* Return true if there may not be enough input for the next decoding loop. */
static inline bool rc_limit_exceeded(const struct rc_dec *rc)
{
return rc->in_pos > rc->in_limit;
}
/*
* Return true if it is possible (from point of view of range decoder) that
* we have reached the end of the LZMA chunk.
*/
static inline bool rc_is_finished(const struct rc_dec *rc)
{
return rc->code == 0;
}
/* Read the next input byte if needed. */
static __always_inline void rc_normalize(struct rc_dec *rc)
{
if (rc->range < RC_TOP_VALUE) {
rc->range <<= RC_SHIFT_BITS;
rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
}
}
/*
* Decode one bit. In some versions, this function has been split in three
* functions so that the compiler is supposed to be able to more easily avoid
* an extra branch. In this particular version of the LZMA decoder, this
* doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
* on x86). Using a non-split version results in nicer looking code too.
*
* NOTE: This must return an int. Do not make it return a bool or the speed
* of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
* and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
*/
static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
{
uint32_t bound;
int bit;
rc_normalize(rc);
bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
if (rc->code < bound) {
rc->range = bound;
*prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
bit = 0;
} else {
rc->range -= bound;
rc->code -= bound;
*prob -= *prob >> RC_MOVE_BITS;
bit = 1;
}
return bit;
}
/* Decode a bittree starting from the most significant bit. */
static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
uint16_t *probs, uint32_t limit)
{
uint32_t symbol = 1;
do {
if (rc_bit(rc, &probs[symbol]))
symbol = (symbol << 1) + 1;
else
symbol <<= 1;
} while (symbol < limit);
return symbol;
}
/* Decode a bittree starting from the least significant bit. */
static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
uint16_t *probs,
uint32_t *dest, uint32_t limit)
{
uint32_t symbol = 1;
uint32_t i = 0;
do {
if (rc_bit(rc, &probs[symbol])) {
symbol = (symbol << 1) + 1;
*dest += 1 << i;
} else {
symbol <<= 1;
}
} while (++i < limit);
}
/* Decode direct bits (fixed fifty-fifty probability) */
static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
{
uint32_t mask;
do {
rc_normalize(rc);
rc->range >>= 1;
rc->code -= rc->range;
mask = (uint32_t)0 - (rc->code >> 31);
rc->code += rc->range & mask;
*dest = (*dest << 1) + (mask + 1);
} while (--limit > 0);
}
/********
* LZMA *
********/
/* Get pointer to literal coder probability array. */
static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
{
uint32_t prev_byte = dict_get(&s->dict, 0);
uint32_t low = prev_byte >> (8 - s->lzma.lc);
uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
return s->lzma.literal[low + high];
}
/* Decode a literal (one 8-bit byte) */
static void lzma_literal(struct xz_dec_lzma2 *s)
{
uint16_t *probs;
uint32_t symbol;
uint32_t match_byte;
uint32_t match_bit;
uint32_t offset;
uint32_t i;
probs = lzma_literal_probs(s);
if (lzma_state_is_literal(s->lzma.state)) {
symbol = rc_bittree(&s->rc, probs, 0x100);
} else {
symbol = 1;
match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
offset = 0x100;
do {
match_bit = match_byte & offset;
match_byte <<= 1;
i = offset + match_bit + symbol;
if (rc_bit(&s->rc, &probs[i])) {
symbol = (symbol << 1) + 1;
offset &= match_bit;
} else {
symbol <<= 1;
offset &= ~match_bit;
}
} while (symbol < 0x100);
}
dict_put(&s->dict, (uint8_t)symbol);
lzma_state_literal(&s->lzma.state);
}
/* Decode the length of the match into s->lzma.len. */
static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
uint32_t pos_state)
{
uint16_t *probs;
uint32_t limit;
if (!rc_bit(&s->rc, &l->choice)) {
probs = l->low[pos_state];
limit = LEN_LOW_SYMBOLS;
s->lzma.len = MATCH_LEN_MIN;
} else {
if (!rc_bit(&s->rc, &l->choice2)) {
probs = l->mid[pos_state];
limit = LEN_MID_SYMBOLS;
s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
} else {
probs = l->high;
limit = LEN_HIGH_SYMBOLS;
s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
+ LEN_MID_SYMBOLS;
}
}
s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
}
/* Decode a match. The distance will be stored in s->lzma.rep0. */
static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
{
uint16_t *probs;
uint32_t dist_slot;
uint32_t limit;
lzma_state_match(&s->lzma.state);
s->lzma.rep3 = s->lzma.rep2;
s->lzma.rep2 = s->lzma.rep1;
s->lzma.rep1 = s->lzma.rep0;
lzma_len(s, &s->lzma.match_len_dec, pos_state);
probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
if (dist_slot < DIST_MODEL_START) {
s->lzma.rep0 = dist_slot;
} else {
limit = (dist_slot >> 1) - 1;
s->lzma.rep0 = 2 + (dist_slot & 1);
if (dist_slot < DIST_MODEL_END) {
s->lzma.rep0 <<= limit;
probs = s->lzma.dist_special + s->lzma.rep0
- dist_slot - 1;
rc_bittree_reverse(&s->rc, probs,
&s->lzma.rep0, limit);
} else {
rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
s->lzma.rep0 <<= ALIGN_BITS;
rc_bittree_reverse(&s->rc, s->lzma.dist_align,
&s->lzma.rep0, ALIGN_BITS);
}
}
}
/*
* Decode a repeated match. The distance is one of the four most recently
* seen matches. The distance will be stored in s->lzma.rep0.
*/
static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
{
uint32_t tmp;
if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
s->lzma.state][pos_state])) {
lzma_state_short_rep(&s->lzma.state);
s->lzma.len = 1;
return;
}
} else {
if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
tmp = s->lzma.rep1;
} else {
if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
tmp = s->lzma.rep2;
} else {
tmp = s->lzma.rep3;
s->lzma.rep3 = s->lzma.rep2;
}
s->lzma.rep2 = s->lzma.rep1;
}
s->lzma.rep1 = s->lzma.rep0;
s->lzma.rep0 = tmp;
}
lzma_state_long_rep(&s->lzma.state);
lzma_len(s, &s->lzma.rep_len_dec, pos_state);
}
/* LZMA decoder core */
static bool lzma_main(struct xz_dec_lzma2 *s)
{
uint32_t pos_state;
/*
* If the dictionary was reached during the previous call, try to
* finish the possibly pending repeat in the dictionary.
*/
if (dict_has_space(&s->dict) && s->lzma.len > 0)
dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
/*
* Decode more LZMA symbols. One iteration may consume up to
* LZMA_IN_REQUIRED - 1 bytes.
*/
while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
pos_state = s->dict.pos & s->lzma.pos_mask;
if (!rc_bit(&s->rc, &s->lzma.is_match[
s->lzma.state][pos_state])) {
lzma_literal(s);
} else {
if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
lzma_rep_match(s, pos_state);
else
lzma_match(s, pos_state);
if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
return false;
}
}
/*
* Having the range decoder always normalized when we are outside
* this function makes it easier to correctly handle end of the chunk.
*/
rc_normalize(&s->rc);
return true;
}
/*
* Reset the LZMA decoder and range decoder state. Dictionary is not reset
* here, because LZMA state may be reset without resetting the dictionary.
*/
static void lzma_reset(struct xz_dec_lzma2 *s)
{
uint16_t *probs;
size_t i;
s->lzma.state = STATE_LIT_LIT;
s->lzma.rep0 = 0;
s->lzma.rep1 = 0;
s->lzma.rep2 = 0;
s->lzma.rep3 = 0;
s->lzma.len = 0;
/*
* All probabilities are initialized to the same value. This hack
* makes the code smaller by avoiding a separate loop for each
* probability array.
*
* This could be optimized so that only that part of literal
* probabilities that are actually required. In the common case
* we would write 12 KiB less.
*/
probs = s->lzma.is_match[0];
for (i = 0; i < PROBS_TOTAL; ++i)
probs[i] = RC_BIT_MODEL_TOTAL / 2;
rc_reset(&s->rc);
}
/*
* Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
* from the decoded lp and pb values. On success, the LZMA decoder state is
* reset and true is returned.
*/
static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
{
if (props > (4 * 5 + 4) * 9 + 8)
return false;
s->lzma.pos_mask = 0;
while (props >= 9 * 5) {
props -= 9 * 5;
++s->lzma.pos_mask;
}
s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
s->lzma.literal_pos_mask = 0;
while (props >= 9) {
props -= 9;
++s->lzma.literal_pos_mask;
}
s->lzma.lc = props;
if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
return false;
s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
lzma_reset(s);
return true;
}
/*********
* LZMA2 *
*********/
/*
* The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
* been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
* wrapper function takes care of making the LZMA decoder's assumption safe.
*
* As long as there is plenty of input left to be decoded in the current LZMA
* chunk, we decode directly from the caller-supplied input buffer until
* there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
* s->temp.buf, which (hopefully) gets filled on the next call to this
* function. We decode a few bytes from the temporary buffer so that we can
* continue decoding from the caller-supplied input buffer again.
*/
static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
{
size_t in_avail;
uint32_t tmp;
in_avail = b->in_size - b->in_pos;
if (s->temp.size > 0 || s->lzma2.compressed == 0) {
tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
if (tmp > s->lzma2.compressed - s->temp.size)
tmp = s->lzma2.compressed - s->temp.size;
if (tmp > in_avail)
tmp = in_avail;
memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
if (s->temp.size + tmp == s->lzma2.compressed) {
memzero(s->temp.buf + s->temp.size + tmp,
sizeof(s->temp.buf)
- s->temp.size - tmp);
s->rc.in_limit = s->temp.size + tmp;
} else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
s->temp.size += tmp;
b->in_pos += tmp;
return true;
} else {
s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
}
s->rc.in = s->temp.buf;
s->rc.in_pos = 0;
if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
return false;
s->lzma2.compressed -= s->rc.in_pos;
if (s->rc.in_pos < s->temp.size) {
s->temp.size -= s->rc.in_pos;
memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
s->temp.size);
return true;
}
b->in_pos += s->rc.in_pos - s->temp.size;
s->temp.size = 0;
}
in_avail = b->in_size - b->in_pos;
if (in_avail >= LZMA_IN_REQUIRED) {
s->rc.in = b->in;
s->rc.in_pos = b->in_pos;
if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
s->rc.in_limit = b->in_pos + s->lzma2.compressed;
else
s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
if (!lzma_main(s))
return false;
in_avail = s->rc.in_pos - b->in_pos;
if (in_avail > s->lzma2.compressed)
return false;
s->lzma2.compressed -= in_avail;
b->in_pos = s->rc.in_pos;
}
in_avail = b->in_size - b->in_pos;
if (in_avail < LZMA_IN_REQUIRED) {
if (in_avail > s->lzma2.compressed)
in_avail = s->lzma2.compressed;
memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
s->temp.size = in_avail;
b->in_pos += in_avail;
}
return true;
}
/*
* Take care of the LZMA2 control layer, and forward the job of actual LZMA
* decoding or copying of uncompressed chunks to other functions.
*/
XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
struct xz_buf *b)
{
uint32_t tmp;
while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
switch (s->lzma2.sequence) {
case SEQ_CONTROL:
/*
* LZMA2 control byte
*
* Exact values:
* 0x00 End marker
* 0x01 Dictionary reset followed by
* an uncompressed chunk
* 0x02 Uncompressed chunk (no dictionary reset)
*
* Highest three bits (s->control & 0xE0):
* 0xE0 Dictionary reset, new properties and state
* reset, followed by LZMA compressed chunk
* 0xC0 New properties and state reset, followed
* by LZMA compressed chunk (no dictionary
* reset)
* 0xA0 State reset using old properties,
* followed by LZMA compressed chunk (no
* dictionary reset)
* 0x80 LZMA chunk (no dictionary or state reset)
*
* For LZMA compressed chunks, the lowest five bits
* (s->control & 1F) are the highest bits of the
* uncompressed size (bits 16-20).
*
* A new LZMA2 stream must begin with a dictionary
* reset. The first LZMA chunk must set new
* properties and reset the LZMA state.
*
* Values that don't match anything described above
* are invalid and we return XZ_DATA_ERROR.
*/
tmp = b->in[b->in_pos++];
if (tmp == 0x00)
return XZ_STREAM_END;
if (tmp >= 0xE0 || tmp == 0x01) {
s->lzma2.need_props = true;
s->lzma2.need_dict_reset = false;
dict_reset(&s->dict, b);
} else if (s->lzma2.need_dict_reset) {
return XZ_DATA_ERROR;
}
if (tmp >= 0x80) {
s->lzma2.uncompressed = (tmp & 0x1F) << 16;
s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
if (tmp >= 0xC0) {
/*
* When there are new properties,
* state reset is done at
* SEQ_PROPERTIES.
*/
s->lzma2.need_props = false;
s->lzma2.next_sequence
= SEQ_PROPERTIES;
} else if (s->lzma2.need_props) {
return XZ_DATA_ERROR;
} else {
s->lzma2.next_sequence
= SEQ_LZMA_PREPARE;
if (tmp >= 0xA0)
lzma_reset(s);
}
} else {
if (tmp > 0x02)
return XZ_DATA_ERROR;
s->lzma2.sequence = SEQ_COMPRESSED_0;
s->lzma2.next_sequence = SEQ_COPY;
}
break;
case SEQ_UNCOMPRESSED_1:
s->lzma2.uncompressed
+= (uint32_t)b->in[b->in_pos++] << 8;
s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
break;
case SEQ_UNCOMPRESSED_2:
s->lzma2.uncompressed
+= (uint32_t)b->in[b->in_pos++] + 1;
s->lzma2.sequence = SEQ_COMPRESSED_0;
break;
case SEQ_COMPRESSED_0:
s->lzma2.compressed
= (uint32_t)b->in[b->in_pos++] << 8;
s->lzma2.sequence = SEQ_COMPRESSED_1;
break;
case SEQ_COMPRESSED_1:
s->lzma2.compressed
+= (uint32_t)b->in[b->in_pos++] + 1;
s->lzma2.sequence = s->lzma2.next_sequence;
break;
case SEQ_PROPERTIES:
if (!lzma_props(s, b->in[b->in_pos++]))
return XZ_DATA_ERROR;
s->lzma2.sequence = SEQ_LZMA_PREPARE;
fallthrough;
case SEQ_LZMA_PREPARE:
if (s->lzma2.compressed < RC_INIT_BYTES)
return XZ_DATA_ERROR;
if (!rc_read_init(&s->rc, b))
return XZ_OK;
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
fallthrough;
case SEQ_LZMA_RUN:
/*
* Set dictionary limit to indicate how much we want
* to be encoded at maximum. Decode new data into the
* dictionary. Flush the new data from dictionary to
* b->out. Check if we finished decoding this chunk.
* In case the dictionary got full but we didn't fill
* the output buffer yet, we may run this loop
* multiple times without changing s->lzma2.sequence.
*/
dict_limit(&s->dict, min_t(size_t,
b->out_size - b->out_pos,
s->lzma2.uncompressed));
if (!lzma2_lzma(s, b))
return XZ_DATA_ERROR;
s->lzma2.uncompressed -= dict_flush(&s->dict, b);
if (s->lzma2.uncompressed == 0) {
if (s->lzma2.compressed > 0 || s->lzma.len > 0
|| !rc_is_finished(&s->rc))
return XZ_DATA_ERROR;
rc_reset(&s->rc);
s->lzma2.sequence = SEQ_CONTROL;
} else if (b->out_pos == b->out_size
|| (b->in_pos == b->in_size
&& s->temp.size
< s->lzma2.compressed)) {
return XZ_OK;
}
break;
case SEQ_COPY:
dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
if (s->lzma2.compressed > 0)
return XZ_OK;
s->lzma2.sequence = SEQ_CONTROL;
break;
}
}
return XZ_OK;
}
XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
uint32_t dict_max)
{
struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
return NULL;
s->dict.mode = mode;
s->dict.size_max = dict_max;
if (DEC_IS_PREALLOC(mode)) {
s->dict.buf = vmalloc(dict_max);
if (s->dict.buf == NULL) {
kfree(s);
return NULL;
}
} else if (DEC_IS_DYNALLOC(mode)) {
s->dict.buf = NULL;
s->dict.allocated = 0;
}
return s;
}
XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
{
/* This limits dictionary size to 3 GiB to keep parsing simpler. */
if (props > 39)
return XZ_OPTIONS_ERROR;
s->dict.size = 2 + (props & 1);
s->dict.size <<= (props >> 1) + 11;
if (DEC_IS_MULTI(s->dict.mode)) {
if (s->dict.size > s->dict.size_max)
return XZ_MEMLIMIT_ERROR;
s->dict.end = s->dict.size;
if (DEC_IS_DYNALLOC(s->dict.mode)) {
if (s->dict.allocated < s->dict.size) {
s->dict.allocated = s->dict.size;
vfree(s->dict.buf);
s->dict.buf = vmalloc(s->dict.size);
if (s->dict.buf == NULL) {
s->dict.allocated = 0;
return XZ_MEM_ERROR;
}
}
}
}
s->lzma2.sequence = SEQ_CONTROL;
s->lzma2.need_dict_reset = true;
s->temp.size = 0;
return XZ_OK;
}
XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
{
if (DEC_IS_MULTI(s->dict.mode))
vfree(s->dict.buf);
kfree(s);
}
#ifdef XZ_DEC_MICROLZMA
/* This is a wrapper struct to have a nice struct name in the public API. */
struct xz_dec_microlzma {
struct xz_dec_lzma2 s;
};
enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s_ptr,
struct xz_buf *b)
{
struct xz_dec_lzma2 *s = &s_ptr->s;
/*
* sequence is SEQ_PROPERTIES before the first input byte,
* SEQ_LZMA_PREPARE until a total of five bytes have been read,
* and SEQ_LZMA_RUN for the rest of the input stream.
*/
if (s->lzma2.sequence != SEQ_LZMA_RUN) {
if (s->lzma2.sequence == SEQ_PROPERTIES) {
/* One byte is needed for the props. */
if (b->in_pos >= b->in_size)
return XZ_OK;
/*
* Don't increment b->in_pos here. The same byte is
* also passed to rc_read_init() which will ignore it.
*/
if (!lzma_props(s, ~b->in[b->in_pos]))
return XZ_DATA_ERROR;
s->lzma2.sequence = SEQ_LZMA_PREPARE;
}
/*
* xz_dec_microlzma_reset() doesn't validate the compressed
* size so we do it here. We have to limit the maximum size
* to avoid integer overflows in lzma2_lzma(). 3 GiB is a nice
* round number and much more than users of this code should
* ever need.
*/
if (s->lzma2.compressed < RC_INIT_BYTES
|| s->lzma2.compressed > (3U << 30))
return XZ_DATA_ERROR;
if (!rc_read_init(&s->rc, b))
return XZ_OK;
s->lzma2.compressed -= RC_INIT_BYTES;
s->lzma2.sequence = SEQ_LZMA_RUN;
dict_reset(&s->dict, b);
}
/* This is to allow increasing b->out_size between calls. */
if (DEC_IS_SINGLE(s->dict.mode))
s->dict.end = b->out_size - b->out_pos;
while (true) {
dict_limit(&s->dict, min_t(size_t, b->out_size - b->out_pos,
s->lzma2.uncompressed));
if (!lzma2_lzma(s, b))
return XZ_DATA_ERROR;
s->lzma2.uncompressed -= dict_flush(&s->dict, b);
if (s->lzma2.uncompressed == 0) {
if (s->lzma2.pedantic_microlzma) {
if (s->lzma2.compressed > 0 || s->lzma.len > 0
|| !rc_is_finished(&s->rc))
return XZ_DATA_ERROR;
}
return XZ_STREAM_END;
}
if (b->out_pos == b->out_size)
return XZ_OK;
if (b->in_pos == b->in_size
&& s->temp.size < s->lzma2.compressed)
return XZ_OK;
}
}
struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
uint32_t dict_size)
{
struct xz_dec_microlzma *s;
/* Restrict dict_size to the same range as in the LZMA2 code. */
if (dict_size < 4096 || dict_size > (3U << 30))
return NULL;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL)
return NULL;
s->s.dict.mode = mode;
s->s.dict.size = dict_size;
if (DEC_IS_MULTI(mode)) {
s->s.dict.end = dict_size;
s->s.dict.buf = vmalloc(dict_size);
if (s->s.dict.buf == NULL) {
kfree(s);
return NULL;
}
}
return s;
}
void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
uint32_t uncomp_size, int uncomp_size_is_exact)
{
/*
* comp_size is validated in xz_dec_microlzma_run().
* uncomp_size can safely be anything.
*/
s->s.lzma2.compressed = comp_size;
s->s.lzma2.uncompressed = uncomp_size;
s->s.lzma2.pedantic_microlzma = uncomp_size_is_exact;
s->s.lzma2.sequence = SEQ_PROPERTIES;
s->s.temp.size = 0;
}
void xz_dec_microlzma_end(struct xz_dec_microlzma *s)
{
if (DEC_IS_MULTI(s->s.dict.mode))
vfree(s->s.dict.buf);
kfree(s);
}
#endif
| linux-master | lib/xz/xz_dec_lzma2.c |
/*
* XZ decoder tester
*
* Author: Lasse Collin <[email protected]>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/crc32.h>
#include <linux/xz.h>
/* Maximum supported dictionary size */
#define DICT_MAX (1 << 20)
/* Device name to pass to register_chrdev(). */
#define DEVICE_NAME "xz_dec_test"
/* Dynamically allocated device major number */
static int device_major;
/*
* We reuse the same decoder state, and thus can decode only one
* file at a time.
*/
static bool device_is_open;
/* XZ decoder state */
static struct xz_dec *state;
/*
* Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after
* it has returned XZ_STREAM_END, so we make this static.
*/
static enum xz_ret ret;
/*
* Input and output buffers. The input buffer is used as a temporary safe
* place for the data coming from the userspace.
*/
static uint8_t buffer_in[1024];
static uint8_t buffer_out[1024];
/*
* Structure to pass the input and output buffers to the XZ decoder.
* A few of the fields are never modified so we initialize them here.
*/
static struct xz_buf buffers = {
.in = buffer_in,
.out = buffer_out,
.out_size = sizeof(buffer_out)
};
/*
* CRC32 of uncompressed data. This is used to give the user a simple way
* to check that the decoder produces correct output.
*/
static uint32_t crc;
static int xz_dec_test_open(struct inode *i, struct file *f)
{
if (device_is_open)
return -EBUSY;
device_is_open = true;
xz_dec_reset(state);
ret = XZ_OK;
crc = 0xFFFFFFFF;
buffers.in_pos = 0;
buffers.in_size = 0;
buffers.out_pos = 0;
printk(KERN_INFO DEVICE_NAME ": opened\n");
return 0;
}
static int xz_dec_test_release(struct inode *i, struct file *f)
{
device_is_open = false;
if (ret == XZ_OK)
printk(KERN_INFO DEVICE_NAME ": input was truncated\n");
printk(KERN_INFO DEVICE_NAME ": closed\n");
return 0;
}
/*
* Decode the data given to us from the userspace. CRC32 of the uncompressed
* data is calculated and is printed at the end of successful decoding. The
* uncompressed data isn't stored anywhere for further use.
*
* The .xz file must have exactly one Stream and no Stream Padding. The data
* after the first Stream is considered to be garbage.
*/
static ssize_t xz_dec_test_write(struct file *file, const char __user *buf,
size_t size, loff_t *pos)
{
size_t remaining;
if (ret != XZ_OK) {
if (size > 0)
printk(KERN_INFO DEVICE_NAME ": %zu bytes of "
"garbage at the end of the file\n",
size);
return -ENOSPC;
}
printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n",
size);
remaining = size;
while ((remaining > 0 || buffers.out_pos == buffers.out_size)
&& ret == XZ_OK) {
if (buffers.in_pos == buffers.in_size) {
buffers.in_pos = 0;
buffers.in_size = min(remaining, sizeof(buffer_in));
if (copy_from_user(buffer_in, buf, buffers.in_size))
return -EFAULT;
buf += buffers.in_size;
remaining -= buffers.in_size;
}
buffers.out_pos = 0;
ret = xz_dec_run(state, &buffers);
crc = crc32(crc, buffer_out, buffers.out_pos);
}
switch (ret) {
case XZ_OK:
printk(KERN_INFO DEVICE_NAME ": XZ_OK\n");
return size;
case XZ_STREAM_END:
printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, "
"CRC32 = 0x%08X\n", ~crc);
return size - remaining - (buffers.in_size - buffers.in_pos);
case XZ_MEMLIMIT_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n");
break;
case XZ_FORMAT_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n");
break;
case XZ_OPTIONS_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n");
break;
case XZ_DATA_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n");
break;
case XZ_BUF_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n");
break;
default:
printk(KERN_INFO DEVICE_NAME ": Bug detected!\n");
break;
}
return -EIO;
}
/* Allocate the XZ decoder state and register the character device. */
static int __init xz_dec_test_init(void)
{
static const struct file_operations fileops = {
.owner = THIS_MODULE,
.open = &xz_dec_test_open,
.release = &xz_dec_test_release,
.write = &xz_dec_test_write
};
state = xz_dec_init(XZ_PREALLOC, DICT_MAX);
if (state == NULL)
return -ENOMEM;
device_major = register_chrdev(0, DEVICE_NAME, &fileops);
if (device_major < 0) {
xz_dec_end(state);
return device_major;
}
printk(KERN_INFO DEVICE_NAME ": module loaded\n");
printk(KERN_INFO DEVICE_NAME ": Create a device node with "
"'mknod " DEVICE_NAME " c %d 0' and write .xz files "
"to it.\n", device_major);
return 0;
}
static void __exit xz_dec_test_exit(void)
{
unregister_chrdev(device_major, DEVICE_NAME);
xz_dec_end(state);
printk(KERN_INFO DEVICE_NAME ": module unloaded\n");
}
module_init(xz_dec_test_init);
module_exit(xz_dec_test_exit);
MODULE_DESCRIPTION("XZ decompressor tester");
MODULE_VERSION("1.0");
MODULE_AUTHOR("Lasse Collin <[email protected]>");
/*
* This code is in the public domain, but in Linux it's simplest to just
* say it's GPL and consider the authors as the copyright holders.
*/
MODULE_LICENSE("GPL");
| linux-master | lib/xz/xz_dec_test.c |
/*
* XZ decoder module information
*
* Author: Lasse Collin <[email protected]>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
#include <linux/module.h>
#include <linux/xz.h>
EXPORT_SYMBOL(xz_dec_init);
EXPORT_SYMBOL(xz_dec_reset);
EXPORT_SYMBOL(xz_dec_run);
EXPORT_SYMBOL(xz_dec_end);
#ifdef CONFIG_XZ_DEC_MICROLZMA
EXPORT_SYMBOL(xz_dec_microlzma_alloc);
EXPORT_SYMBOL(xz_dec_microlzma_reset);
EXPORT_SYMBOL(xz_dec_microlzma_run);
EXPORT_SYMBOL(xz_dec_microlzma_end);
#endif
MODULE_DESCRIPTION("XZ decompressor");
MODULE_VERSION("1.1");
MODULE_AUTHOR("Lasse Collin <[email protected]> and Igor Pavlov");
/*
* This code is in the public domain, but in Linux it's simplest to just
* say it's GPL and consider the authors as the copyright holders.
*/
MODULE_LICENSE("GPL");
| linux-master | lib/xz/xz_dec_syms.c |
/*
* Copyright (c) 2011 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/cordic.h>
static const s32 arctan_table[] = {
2949120,
1740967,
919879,
466945,
234379,
117304,
58666,
29335,
14668,
7334,
3667,
1833,
917,
458,
229,
115,
57,
29
};
/*
* cordic_calc_iq() - calculates the i/q coordinate for given angle
*
* theta: angle in degrees for which i/q coordinate is to be calculated
* coord: function output parameter holding the i/q coordinate
*/
struct cordic_iq cordic_calc_iq(s32 theta)
{
struct cordic_iq coord;
s32 angle, valtmp;
unsigned iter;
int signx = 1;
int signtheta;
coord.i = CORDIC_ANGLE_GEN;
coord.q = 0;
angle = 0;
theta = CORDIC_FIXED(theta);
signtheta = (theta < 0) ? -1 : 1;
theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
CORDIC_FIXED(180) * signtheta;
if (CORDIC_FLOAT(theta) > 90) {
theta -= CORDIC_FIXED(180);
signx = -1;
} else if (CORDIC_FLOAT(theta) < -90) {
theta += CORDIC_FIXED(180);
signx = -1;
}
for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
if (theta > angle) {
valtmp = coord.i - (coord.q >> iter);
coord.q += (coord.i >> iter);
angle += arctan_table[iter];
} else {
valtmp = coord.i + (coord.q >> iter);
coord.q -= (coord.i >> iter);
angle -= arctan_table[iter];
}
coord.i = valtmp;
}
coord.i *= signx;
coord.q *= signx;
return coord;
}
EXPORT_SYMBOL(cordic_calc_iq);
MODULE_DESCRIPTION("CORDIC algorithm");
MODULE_AUTHOR("Broadcom Corporation");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | lib/math/cordic.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/gcd.h>
#include <linux/export.h>
/*
* This implements the binary GCD algorithm. (Often attributed to Stein,
* but as Knuth has noted, appears in a first-century Chinese math text.)
*
* This is faster than the division-based algorithm even on x86, which
* has decent hardware division.
*/
#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS)
/* If __ffs is available, the even/odd algorithm benchmarks slower. */
/**
* gcd - calculate and return the greatest common divisor of 2 unsigned longs
* @a: first value
* @b: second value
*/
unsigned long gcd(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
b >>= __ffs(b);
if (b == 1)
return r & -r;
for (;;) {
a >>= __ffs(a);
if (a == 1)
return r & -r;
if (a == b)
return a << __ffs(r);
if (a < b)
swap(a, b);
a -= b;
}
}
#else
/* If normalization is done by loops, the even/odd algorithm is a win. */
unsigned long gcd(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
/* Isolate lsbit of r */
r &= -r;
while (!(b & r))
b >>= 1;
if (b == r)
return r;
for (;;) {
while (!(a & r))
a >>= 1;
if (a == r)
return r;
if (a == b)
return a;
if (a < b)
swap(a, b);
a -= b;
a >>= 1;
if (a & r)
a += b;
a >>= 1;
}
}
#endif
EXPORT_SYMBOL_GPL(gcd);
| linux-master | lib/math/gcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Maciej W. Rozycki
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <asm/div64.h>
#define TEST_DIV64_N_ITER 1024
static const u64 test_div64_dividends[] = {
0x00000000ab275080,
0x0000000fe73c1959,
0x000000e54c0a74b1,
0x00000d4398ff1ef9,
0x0000a18c2ee1c097,
0x00079fb80b072e4a,
0x0072db27380dd689,
0x0842f488162e2284,
0xf66745411d8ab063,
};
#define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends)
#define TEST_DIV64_DIVISOR_0 0x00000009
#define TEST_DIV64_DIVISOR_1 0x0000007c
#define TEST_DIV64_DIVISOR_2 0x00000204
#define TEST_DIV64_DIVISOR_3 0x0000cb5b
#define TEST_DIV64_DIVISOR_4 0x00010000
#define TEST_DIV64_DIVISOR_5 0x0008a880
#define TEST_DIV64_DIVISOR_6 0x003fd3ae
#define TEST_DIV64_DIVISOR_7 0x0b658fac
#define TEST_DIV64_DIVISOR_8 0xdc08b349
static const u32 test_div64_divisors[] = {
TEST_DIV64_DIVISOR_0,
TEST_DIV64_DIVISOR_1,
TEST_DIV64_DIVISOR_2,
TEST_DIV64_DIVISOR_3,
TEST_DIV64_DIVISOR_4,
TEST_DIV64_DIVISOR_5,
TEST_DIV64_DIVISOR_6,
TEST_DIV64_DIVISOR_7,
TEST_DIV64_DIVISOR_8,
};
#define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors)
static const struct {
u64 quotient;
u32 remainder;
} test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = {
{
{ 0x0000000013045e47, 0x00000001 },
{ 0x000000000161596c, 0x00000030 },
{ 0x000000000054e9d4, 0x00000130 },
{ 0x000000000000d776, 0x0000278e },
{ 0x000000000000ab27, 0x00005080 },
{ 0x00000000000013c4, 0x0004ce80 },
{ 0x00000000000002ae, 0x001e143c },
{ 0x000000000000000f, 0x0033e56c },
{ 0x0000000000000000, 0xab275080 },
}, {
{ 0x00000001c45c02d1, 0x00000000 },
{ 0x0000000020d5213c, 0x00000049 },
{ 0x0000000007e3d65f, 0x000001dd },
{ 0x0000000000140531, 0x000065ee },
{ 0x00000000000fe73c, 0x00001959 },
{ 0x000000000001d637, 0x0004e5d9 },
{ 0x0000000000003fc9, 0x000713bb },
{ 0x0000000000000165, 0x029abe7d },
{ 0x0000000000000012, 0x6e9f7e37 },
}, {
{ 0x000000197a3a0cf7, 0x00000002 },
{ 0x00000001d9632e5c, 0x00000021 },
{ 0x0000000071c28039, 0x000001cd },
{ 0x000000000120a844, 0x0000b885 },
{ 0x0000000000e54c0a, 0x000074b1 },
{ 0x00000000001a7bb3, 0x00072331 },
{ 0x00000000000397ad, 0x0002c61b },
{ 0x000000000000141e, 0x06ea2e89 },
{ 0x000000000000010a, 0xab002ad7 },
}, {
{ 0x0000017949e37538, 0x00000001 },
{ 0x0000001b62441f37, 0x00000055 },
{ 0x0000000694a3391d, 0x00000085 },
{ 0x0000000010b2a5d2, 0x0000a753 },
{ 0x000000000d4398ff, 0x00001ef9 },
{ 0x0000000001882ec6, 0x0005cbf9 },
{ 0x000000000035333b, 0x0017abdf },
{ 0x00000000000129f1, 0x0ab4520d },
{ 0x0000000000000f6e, 0x8ac0ce9b },
}, {
{ 0x000011f321a74e49, 0x00000006 },
{ 0x0000014d8481d211, 0x0000005b },
{ 0x0000005025cbd92d, 0x000001e3 },
{ 0x00000000cb5e71e3, 0x000043e6 },
{ 0x00000000a18c2ee1, 0x0000c097 },
{ 0x0000000012a88828, 0x00036c97 },
{ 0x000000000287f16f, 0x002c2a25 },
{ 0x00000000000e2cc7, 0x02d581e3 },
{ 0x000000000000bbf4, 0x1ba08c03 },
}, {
{ 0x0000d8db8f72935d, 0x00000005 },
{ 0x00000fbd5aed7a2e, 0x00000002 },
{ 0x000003c84b6ea64a, 0x00000122 },
{ 0x0000000998fa8829, 0x000044b7 },
{ 0x000000079fb80b07, 0x00002e4a },
{ 0x00000000e16b20fa, 0x0002a14a },
{ 0x000000001e940d22, 0x00353b2e },
{ 0x0000000000ab40ac, 0x06fba6ba },
{ 0x000000000008debd, 0x72d98365 },
}, {
{ 0x000cc3045b8fc281, 0x00000000 },
{ 0x0000ed1f48b5c9fc, 0x00000079 },
{ 0x000038fb9c63406a, 0x000000e1 },
{ 0x000000909705b825, 0x00000a62 },
{ 0x00000072db27380d, 0x0000d689 },
{ 0x0000000d43fce827, 0x00082b09 },
{ 0x00000001ccaba11a, 0x0037e8dd },
{ 0x000000000a13f729, 0x0566dffd },
{ 0x000000000085a14b, 0x23d36726 },
}, {
{ 0x00eafeb9c993592b, 0x00000001 },
{ 0x00110e5befa9a991, 0x00000048 },
{ 0x00041947b4a1d36a, 0x000000dc },
{ 0x00000a6679327311, 0x0000c079 },
{ 0x00000842f488162e, 0x00002284 },
{ 0x000000f4459740fc, 0x00084484 },
{ 0x0000002122c47bf9, 0x002ca446 },
{ 0x00000000b9936290, 0x004979c4 },
{ 0x00000000099ca89d, 0x9db446bf },
}, {
{ 0x1b60cece589da1d2, 0x00000001 },
{ 0x01fcb42be1453f5b, 0x0000004f },
{ 0x007a3f2457df0749, 0x0000013f },
{ 0x0001363130e3ec7b, 0x000017aa },
{ 0x0000f66745411d8a, 0x0000b063 },
{ 0x00001c757dfab350, 0x00048863 },
{ 0x000003dc4979c652, 0x00224ea7 },
{ 0x000000159edc3144, 0x06409ab3 },
{ 0x000000011eadfee3, 0xa99c48a8 },
},
};
static inline bool test_div64_verify(u64 quotient, u32 remainder, int i, int j)
{
return (quotient == test_div64_results[i][j].quotient &&
remainder == test_div64_results[i][j].remainder);
}
/*
* This needs to be a macro, because we don't want to rely on the compiler
* to do constant propagation, and `do_div' may take a different path for
* constants, so we do want to verify that as well.
*/
#define test_div64_one(dividend, divisor, i, j) ({ \
bool result = true; \
u64 quotient; \
u32 remainder; \
\
quotient = dividend; \
remainder = do_div(quotient, divisor); \
if (!test_div64_verify(quotient, remainder, i, j)) { \
pr_err("ERROR: %016llx / %08x => %016llx,%08x\n", \
dividend, divisor, quotient, remainder); \
pr_err("ERROR: expected value => %016llx,%08x\n",\
test_div64_results[i][j].quotient, \
test_div64_results[i][j].remainder); \
result = false; \
} \
result; \
})
/*
* Run calculation for the same divisor value expressed as a constant
* and as a variable, so as to verify the implementation for both cases
* should they be handled by different code execution paths.
*/
static bool __init test_div64(void)
{
u64 dividend;
int i, j;
for (i = 0; i < SIZE_DIV64_DIVIDENDS; i++) {
dividend = test_div64_dividends[i];
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_0, i, 0))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_1, i, 1))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_2, i, 2))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_3, i, 3))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_4, i, 4))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_5, i, 5))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_6, i, 6))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_7, i, 7))
return false;
if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8))
return false;
for (j = 0; j < SIZE_DIV64_DIVISORS; j++) {
if (!test_div64_one(dividend, test_div64_divisors[j],
i, j))
return false;
}
}
return true;
}
static int __init test_div64_init(void)
{
struct timespec64 ts, ts0, ts1;
int i;
pr_info("Starting 64bit/32bit division and modulo test\n");
ktime_get_ts64(&ts0);
for (i = 0; i < TEST_DIV64_N_ITER; i++)
if (!test_div64())
break;
ktime_get_ts64(&ts1);
ts = timespec64_sub(ts1, ts0);
pr_info("Completed 64bit/32bit division and modulo test, "
"%llu.%09lus elapsed\n", ts.tv_sec, ts.tv_nsec);
return 0;
}
static void __exit test_div64_exit(void)
{
}
module_init(test_div64_init);
module_exit(test_div64_exit);
MODULE_AUTHOR("Maciej W. Rozycki <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("64bit/32bit division and modulo test module");
| linux-master | lib/math/test_div64.c |
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
#include <linux/rational.h>
struct rational_test_param {
unsigned long num, den;
unsigned long max_num, max_den;
unsigned long exp_num, exp_den;
const char *name;
};
static const struct rational_test_param test_parameters[] = {
{ 1230, 10, 100, 20, 100, 1, "Exceeds bounds, semi-convergent term > 1/2 last term" },
{ 34567,100, 120, 20, 120, 1, "Exceeds bounds, semi-convergent term < 1/2 last term" },
{ 1, 30, 100, 10, 0, 1, "Closest to zero" },
{ 1, 19, 100, 10, 1, 10, "Closest to smallest non-zero" },
{ 27,32, 16, 16, 11, 13, "Use convergent" },
{ 1155, 7735, 255, 255, 33, 221, "Exact answer" },
{ 87, 32, 70, 32, 68, 25, "Semiconvergent, numerator limit" },
{ 14533, 4626, 15000, 2400, 7433, 2366, "Semiconvergent, denominator limit" },
};
static void get_desc(const struct rational_test_param *param, char *desc)
{
strscpy(desc, param->name, KUNIT_PARAM_DESC_SIZE);
}
/* Creates function rational_gen_params */
KUNIT_ARRAY_PARAM(rational, test_parameters, get_desc);
static void rational_test(struct kunit *test)
{
const struct rational_test_param *param = (const struct rational_test_param *)test->param_value;
unsigned long n = 0, d = 0;
rational_best_approximation(param->num, param->den, param->max_num, param->max_den, &n, &d);
KUNIT_EXPECT_EQ(test, n, param->exp_num);
KUNIT_EXPECT_EQ(test, d, param->exp_den);
}
static struct kunit_case rational_test_cases[] = {
KUNIT_CASE_PARAM(rational_test, rational_gen_params),
{}
};
static struct kunit_suite rational_test_suite = {
.name = "rational",
.test_cases = rational_test_cases,
};
kunit_test_suites(&rational_test_suite);
MODULE_LICENSE("GPL v2");
| linux-master | lib/math/rational-test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Davidlohr Bueso <[email protected]>
*
* Based on the shift-and-subtract algorithm for computing integer
* square root from Guy L. Steele.
*/
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/limits.h>
#include <linux/math.h>
/**
* int_sqrt - computes the integer square root
* @x: integer of which to calculate the sqrt
*
* Computes: floor(sqrt(x))
*/
unsigned long int_sqrt(unsigned long x)
{
unsigned long b, m, y = 0;
if (x <= 1)
return x;
m = 1UL << (__fls(x) & ~1UL);
while (m != 0) {
b = y + m;
y >>= 1;
if (x >= b) {
x -= b;
y += m;
}
m >>= 2;
}
return y;
}
EXPORT_SYMBOL(int_sqrt);
#if BITS_PER_LONG < 64
/**
* int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
* is expected.
* @x: 64bit integer of which to calculate the sqrt
*/
u32 int_sqrt64(u64 x)
{
u64 b, m, y = 0;
if (x <= ULONG_MAX)
return int_sqrt((unsigned long) x);
m = 1ULL << ((fls64(x) - 1) & ~1ULL);
while (m != 0) {
b = y + m;
y >>= 1;
if (x >= b) {
x -= b;
y += m;
}
m >>= 2;
}
return y;
}
EXPORT_SYMBOL(int_sqrt64);
#endif
| linux-master | lib/math/int_sqrt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2003 Bernardo Innocenti <[email protected]>
*
* Based on former do_div() implementation from asm-parisc/div64.h:
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <[email protected]>
*
*
* Generic C version of 64bit/32bit division and modulo, with
* 64bit result and 32bit remainder.
*
* The fast case for (n>>32 == 0) is handled inline by do_div().
*
* Code generated for this function might be very inefficient
* for some CPUs. __div64_32() can be overridden by linking arch-specific
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
* or by defining a preprocessor macro in arch/include/asm/div64.h.
*/
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/math.h>
#include <linux/math64.h>
#include <linux/log2.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
#ifndef __div64_32
uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}
EXPORT_SYMBOL(__div64_32);
#endif
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
u64 quotient;
if (dividend < 0) {
quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
*remainder = -*remainder;
if (divisor > 0)
quotient = -quotient;
} else {
quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
if (divisor < 0)
quotient = -quotient;
}
return quotient;
}
EXPORT_SYMBOL(div_s64_rem);
#endif
/*
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: 64bit dividend
* @divisor: 64bit divisor
* @remainder: 64bit remainder
*
* This implementation is a comparable to algorithm used by div64_u64.
* But this operation, which includes math for calculating the remainder,
* is kept distinct to avoid slowing down the div64_u64 operation on 32bit
* systems.
*/
#ifndef div64_u64_rem
u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
u32 high = divisor >> 32;
u64 quot;
if (high == 0) {
u32 rem32;
quot = div_u64_rem(dividend, divisor, &rem32);
*remainder = rem32;
} else {
int n = fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
quot--;
*remainder = dividend - quot * divisor;
if (*remainder >= divisor) {
quot++;
*remainder -= divisor;
}
}
return quot;
}
EXPORT_SYMBOL(div64_u64_rem);
#endif
/*
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
*
* This implementation is a modified version of the algorithm proposed
* by the book 'Hacker's Delight'. The original source and full proof
* can be found here and is available for use without restriction.
*
* 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
*/
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high = divisor >> 32;
u64 quot;
if (high == 0) {
quot = div_u64(dividend, divisor);
} else {
int n = fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
quot--;
if ((dividend - quot * divisor) >= divisor)
quot++;
}
return quot;
}
EXPORT_SYMBOL(div64_u64);
#endif
#ifndef div64_s64
s64 div64_s64(s64 dividend, s64 divisor)
{
s64 quot, t;
quot = div64_u64(abs(dividend), abs(divisor));
t = (dividend ^ divisor) >> 63;
return (quot ^ t) - t;
}
EXPORT_SYMBOL(div64_s64);
#endif
#endif /* BITS_PER_LONG == 32 */
/*
* Iterative div/mod for use when dividend is not expected to be much
* bigger than divisor.
*/
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);
#ifndef mul_u64_u64_div_u64
u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
{
u64 res = 0, div, rem;
int shift;
/* can a * b overflow ? */
if (ilog2(a) + ilog2(b) > 62) {
/*
* (b * a) / c is equal to
*
* (b / c) * a +
* (b % c) * a / c
*
* if nothing overflows. Can the 1st multiplication
* overflow? Yes, but we do not care: this can only
* happen if the end result can't fit in u64 anyway.
*
* So the code below does
*
* res = (b / c) * a;
* b = b % c;
*/
div = div64_u64_rem(b, c, &rem);
res = div * a;
b = rem;
shift = ilog2(a) + ilog2(b) - 62;
if (shift > 0) {
/* drop precision */
b >>= shift;
c >>= shift;
if (!c)
return res;
}
}
return res + div64_u64(a * b, c);
}
EXPORT_SYMBOL(mul_u64_u64_div_u64);
#endif
| linux-master | lib/math/div64.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/compiler.h>
#include <linux/gcd.h>
#include <linux/export.h>
#include <linux/lcm.h>
/* Lowest common multiple */
unsigned long lcm(unsigned long a, unsigned long b)
{
if (a && b)
return (a / gcd(a, b)) * b;
else
return 0;
}
EXPORT_SYMBOL_GPL(lcm);
unsigned long lcm_not_zero(unsigned long a, unsigned long b)
{
unsigned long l = lcm(a, b);
if (l)
return l;
return (b ? : a);
}
EXPORT_SYMBOL_GPL(lcm_not_zero);
| linux-master | lib/math/lcm.c |
// SPDX-License-Identifier: LGPL-2.1-or-later
/*
* Provides fixed-point logarithm operations.
*
* Copyright (C) 2006 Christoph Pfister ([email protected])
*/
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/int_log.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/bug.h>
static const unsigned short logtable[256] = {
0x0000, 0x0171, 0x02e0, 0x044e, 0x05ba, 0x0725, 0x088e, 0x09f7,
0x0b5d, 0x0cc3, 0x0e27, 0x0f8a, 0x10eb, 0x124b, 0x13aa, 0x1508,
0x1664, 0x17bf, 0x1919, 0x1a71, 0x1bc8, 0x1d1e, 0x1e73, 0x1fc6,
0x2119, 0x226a, 0x23ba, 0x2508, 0x2656, 0x27a2, 0x28ed, 0x2a37,
0x2b80, 0x2cc8, 0x2e0f, 0x2f54, 0x3098, 0x31dc, 0x331e, 0x345f,
0x359f, 0x36de, 0x381b, 0x3958, 0x3a94, 0x3bce, 0x3d08, 0x3e41,
0x3f78, 0x40af, 0x41e4, 0x4319, 0x444c, 0x457f, 0x46b0, 0x47e1,
0x4910, 0x4a3f, 0x4b6c, 0x4c99, 0x4dc5, 0x4eef, 0x5019, 0x5142,
0x526a, 0x5391, 0x54b7, 0x55dc, 0x5700, 0x5824, 0x5946, 0x5a68,
0x5b89, 0x5ca8, 0x5dc7, 0x5ee5, 0x6003, 0x611f, 0x623a, 0x6355,
0x646f, 0x6588, 0x66a0, 0x67b7, 0x68ce, 0x69e4, 0x6af8, 0x6c0c,
0x6d20, 0x6e32, 0x6f44, 0x7055, 0x7165, 0x7274, 0x7383, 0x7490,
0x759d, 0x76aa, 0x77b5, 0x78c0, 0x79ca, 0x7ad3, 0x7bdb, 0x7ce3,
0x7dea, 0x7ef0, 0x7ff6, 0x80fb, 0x81ff, 0x8302, 0x8405, 0x8507,
0x8608, 0x8709, 0x8809, 0x8908, 0x8a06, 0x8b04, 0x8c01, 0x8cfe,
0x8dfa, 0x8ef5, 0x8fef, 0x90e9, 0x91e2, 0x92db, 0x93d2, 0x94ca,
0x95c0, 0x96b6, 0x97ab, 0x98a0, 0x9994, 0x9a87, 0x9b7a, 0x9c6c,
0x9d5e, 0x9e4f, 0x9f3f, 0xa02e, 0xa11e, 0xa20c, 0xa2fa, 0xa3e7,
0xa4d4, 0xa5c0, 0xa6ab, 0xa796, 0xa881, 0xa96a, 0xaa53, 0xab3c,
0xac24, 0xad0c, 0xadf2, 0xaed9, 0xafbe, 0xb0a4, 0xb188, 0xb26c,
0xb350, 0xb433, 0xb515, 0xb5f7, 0xb6d9, 0xb7ba, 0xb89a, 0xb97a,
0xba59, 0xbb38, 0xbc16, 0xbcf4, 0xbdd1, 0xbead, 0xbf8a, 0xc065,
0xc140, 0xc21b, 0xc2f5, 0xc3cf, 0xc4a8, 0xc580, 0xc658, 0xc730,
0xc807, 0xc8de, 0xc9b4, 0xca8a, 0xcb5f, 0xcc34, 0xcd08, 0xcddc,
0xceaf, 0xcf82, 0xd054, 0xd126, 0xd1f7, 0xd2c8, 0xd399, 0xd469,
0xd538, 0xd607, 0xd6d6, 0xd7a4, 0xd872, 0xd93f, 0xda0c, 0xdad9,
0xdba5, 0xdc70, 0xdd3b, 0xde06, 0xded0, 0xdf9a, 0xe063, 0xe12c,
0xe1f5, 0xe2bd, 0xe385, 0xe44c, 0xe513, 0xe5d9, 0xe69f, 0xe765,
0xe82a, 0xe8ef, 0xe9b3, 0xea77, 0xeb3b, 0xebfe, 0xecc1, 0xed83,
0xee45, 0xef06, 0xefc8, 0xf088, 0xf149, 0xf209, 0xf2c8, 0xf387,
0xf446, 0xf505, 0xf5c3, 0xf680, 0xf73e, 0xf7fb, 0xf8b7, 0xf973,
0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47,
};
unsigned int intlog2(u32 value)
{
/**
* returns: log2(value) * 2^24
* wrong result if value = 0 (log2(0) is undefined)
*/
unsigned int msb;
unsigned int logentry;
unsigned int significand;
unsigned int interpolation;
if (unlikely(value == 0)) {
WARN_ON(1);
return 0;
}
/* first detect the msb (count begins at 0) */
msb = fls(value) - 1;
/**
* now we use a logtable after the following method:
*
* log2(2^x * y) * 2^24 = x * 2^24 + log2(y) * 2^24
* where x = msb and therefore 1 <= y < 2
* first y is determined by shifting the value left
* so that msb is bit 31
* 0x00231f56 -> 0x8C7D5800
* the result is y * 2^31 -> "significand"
* then the highest 9 bits are used for a table lookup
* the highest bit is discarded because it's always set
* the highest nine bits in our example are 100011000
* so we would use the entry 0x18
*/
significand = value << (31 - msb);
logentry = (significand >> 23) % ARRAY_SIZE(logtable);
/**
* last step we do is interpolation because of the
* limitations of the log table the error is that part of
* the significand which isn't used for lookup then we
* compute the ratio between the error and the next table entry
* and interpolate it between the log table entry used and the
* next one the biggest error possible is 0x7fffff
* (in our example it's 0x7D5800)
* needed value for next table entry is 0x800000
* so the interpolation is
* (error / 0x800000) * (logtable_next - logtable_current)
* in the implementation the division is moved to the end for
* better accuracy there is also an overflow correction if
* logtable_next is 256
*/
interpolation = ((significand & 0x7fffff) *
((logtable[(logentry + 1) % ARRAY_SIZE(logtable)] -
logtable[logentry]) & 0xffff)) >> 15;
/* now we return the result */
return ((msb << 24) + (logtable[logentry] << 8) + interpolation);
}
EXPORT_SYMBOL(intlog2);
unsigned int intlog10(u32 value)
{
/**
* returns: log10(value) * 2^24
* wrong result if value = 0 (log10(0) is undefined)
*/
u64 log;
if (unlikely(value == 0)) {
WARN_ON(1);
return 0;
}
log = intlog2(value);
/**
* we use the following method:
* log10(x) = log2(x) * log10(2)
*/
return (log * 646456993) >> 31;
}
EXPORT_SYMBOL(intlog10);
| linux-master | lib/math/int_log.c |
// SPDX-License-Identifier: GPL-2.0
/*
* rational fractions
*
* Copyright (C) 2009 emlix GmbH, Oskar Schirmer <[email protected]>
* Copyright (C) 2019 Trent Piepho <[email protected]>
*
* helper functions when coping with rational numbers
*/
#include <linux/rational.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/minmax.h>
#include <linux/limits.h>
#include <linux/module.h>
/*
* calculate best rational approximation for a given fraction
* taking into account restricted register size, e.g. to find
* appropriate values for a pll with 5 bit denominator and
* 8 bit numerator register fields, trying to set up with a
* frequency ratio of 3.1415, one would say:
*
* rational_best_approximation(31415, 10000,
* (1 << 8) - 1, (1 << 5) - 1, &n, &d);
*
* you may look at given_numerator as a fixed point number,
* with the fractional part size described in given_denominator.
*
* for theoretical background, see:
* https://en.wikipedia.org/wiki/Continued_fraction
*/
void rational_best_approximation(
unsigned long given_numerator, unsigned long given_denominator,
unsigned long max_numerator, unsigned long max_denominator,
unsigned long *best_numerator, unsigned long *best_denominator)
{
/* n/d is the starting rational, which is continually
* decreased each iteration using the Euclidean algorithm.
*
* dp is the value of d from the prior iteration.
*
* n2/d2, n1/d1, and n0/d0 are our successively more accurate
* approximations of the rational. They are, respectively,
* the current, previous, and two prior iterations of it.
*
* a is current term of the continued fraction.
*/
unsigned long n, d, n0, d0, n1, d1, n2, d2;
n = given_numerator;
d = given_denominator;
n0 = d1 = 0;
n1 = d0 = 1;
for (;;) {
unsigned long dp, a;
if (d == 0)
break;
/* Find next term in continued fraction, 'a', via
* Euclidean algorithm.
*/
dp = d;
a = n / d;
d = n % d;
n = dp;
/* Calculate the current rational approximation (aka
* convergent), n2/d2, using the term just found and
* the two prior approximations.
*/
n2 = n0 + a * n1;
d2 = d0 + a * d1;
/* If the current convergent exceeds the maxes, then
* return either the previous convergent or the
* largest semi-convergent, the final term of which is
* found below as 't'.
*/
if ((n2 > max_numerator) || (d2 > max_denominator)) {
unsigned long t = ULONG_MAX;
if (d1)
t = (max_denominator - d0) / d1;
if (n1)
t = min(t, (max_numerator - n0) / n1);
/* This tests if the semi-convergent is closer than the previous
* convergent. If d1 is zero there is no previous convergent as this
* is the 1st iteration, so always choose the semi-convergent.
*/
if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
n1 = n0 + t * n1;
d1 = d0 + t * d1;
}
break;
}
n0 = n1;
n1 = n2;
d0 = d1;
d1 = d2;
}
*best_numerator = n1;
*best_denominator = d1;
}
EXPORT_SYMBOL(rational_best_approximation);
MODULE_LICENSE("GPL v2");
| linux-master | lib/math/rational.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "prime numbers: " fmt
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/prime_numbers.h>
#include <linux/slab.h>
#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
struct primes {
struct rcu_head rcu;
unsigned long last, sz;
unsigned long primes[];
};
#if BITS_PER_LONG == 64
static const struct primes small_primes = {
.last = 61,
.sz = 64,
.primes = {
BIT(2) |
BIT(3) |
BIT(5) |
BIT(7) |
BIT(11) |
BIT(13) |
BIT(17) |
BIT(19) |
BIT(23) |
BIT(29) |
BIT(31) |
BIT(37) |
BIT(41) |
BIT(43) |
BIT(47) |
BIT(53) |
BIT(59) |
BIT(61)
}
};
#elif BITS_PER_LONG == 32
static const struct primes small_primes = {
.last = 31,
.sz = 32,
.primes = {
BIT(2) |
BIT(3) |
BIT(5) |
BIT(7) |
BIT(11) |
BIT(13) |
BIT(17) |
BIT(19) |
BIT(23) |
BIT(29) |
BIT(31)
}
};
#else
#error "unhandled BITS_PER_LONG"
#endif
static DEFINE_MUTEX(lock);
static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
static unsigned long selftest_max;
static bool slow_is_prime_number(unsigned long x)
{
unsigned long y = int_sqrt(x);
while (y > 1) {
if ((x % y) == 0)
break;
y--;
}
return y == 1;
}
static unsigned long slow_next_prime_number(unsigned long x)
{
while (x < ULONG_MAX && !slow_is_prime_number(++x))
;
return x;
}
static unsigned long clear_multiples(unsigned long x,
unsigned long *p,
unsigned long start,
unsigned long end)
{
unsigned long m;
m = 2 * x;
if (m < start)
m = roundup(start, x);
while (m < end) {
__clear_bit(m, p);
m += x;
}
return x;
}
static bool expand_to_next_prime(unsigned long x)
{
const struct primes *p;
struct primes *new;
unsigned long sz, y;
/* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3,
* there is always at least one prime p between n and 2n - 2.
* Equivalently, if n > 1, then there is always at least one prime p
* such that n < p < 2n.
*
* http://mathworld.wolfram.com/BertrandsPostulate.html
* https://en.wikipedia.org/wiki/Bertrand's_postulate
*/
sz = 2 * x;
if (sz < x)
return false;
sz = round_up(sz, BITS_PER_LONG);
new = kmalloc(sizeof(*new) + bitmap_size(sz),
GFP_KERNEL | __GFP_NOWARN);
if (!new)
return false;
mutex_lock(&lock);
p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
if (x < p->last) {
kfree(new);
goto unlock;
}
/* Where memory permits, track the primes using the
* Sieve of Eratosthenes. The sieve is to remove all multiples of known
* primes from the set, what remains in the set is therefore prime.
*/
bitmap_fill(new->primes, sz);
bitmap_copy(new->primes, p->primes, p->sz);
for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1))
new->last = clear_multiples(y, new->primes, p->sz, sz);
new->sz = sz;
BUG_ON(new->last <= x);
rcu_assign_pointer(primes, new);
if (p != &small_primes)
kfree_rcu((struct primes *)p, rcu);
unlock:
mutex_unlock(&lock);
return true;
}
static void free_primes(void)
{
const struct primes *p;
mutex_lock(&lock);
p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
if (p != &small_primes) {
rcu_assign_pointer(primes, &small_primes);
kfree_rcu((struct primes *)p, rcu);
}
mutex_unlock(&lock);
}
/**
* next_prime_number - return the next prime number
* @x: the starting point for searching to test
*
* A prime number is an integer greater than 1 that is only divisible by
* itself and 1. The set of prime numbers is computed using the Sieve of
* Eratoshenes (on finding a prime, all multiples of that prime are removed
* from the set) enabling a fast lookup of the next prime number larger than
* @x. If the sieve fails (memory limitation), the search falls back to using
* slow trial-divison, up to the value of ULONG_MAX (which is reported as the
* final prime as a sentinel).
*
* Returns: the next prime number larger than @x
*/
unsigned long next_prime_number(unsigned long x)
{
const struct primes *p;
rcu_read_lock();
p = rcu_dereference(primes);
while (x >= p->last) {
rcu_read_unlock();
if (!expand_to_next_prime(x))
return slow_next_prime_number(x);
rcu_read_lock();
p = rcu_dereference(primes);
}
x = find_next_bit(p->primes, p->last, x + 1);
rcu_read_unlock();
return x;
}
EXPORT_SYMBOL(next_prime_number);
/**
* is_prime_number - test whether the given number is prime
* @x: the number to test
*
* A prime number is an integer greater than 1 that is only divisible by
* itself and 1. Internally a cache of prime numbers is kept (to speed up
* searching for sequential primes, see next_prime_number()), but if the number
* falls outside of that cache, its primality is tested using trial-divison.
*
* Returns: true if @x is prime, false for composite numbers.
*/
bool is_prime_number(unsigned long x)
{
const struct primes *p;
bool result;
rcu_read_lock();
p = rcu_dereference(primes);
while (x >= p->sz) {
rcu_read_unlock();
if (!expand_to_next_prime(x))
return slow_is_prime_number(x);
rcu_read_lock();
p = rcu_dereference(primes);
}
result = test_bit(x, p->primes);
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL(is_prime_number);
static void dump_primes(void)
{
const struct primes *p;
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
rcu_read_lock();
p = rcu_dereference(primes);
if (buf)
bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
rcu_read_unlock();
kfree(buf);
}
static int selftest(unsigned long max)
{
unsigned long x, last;
if (!max)
return 0;
for (last = 0, x = 2; x < max; x++) {
bool slow = slow_is_prime_number(x);
bool fast = is_prime_number(x);
if (slow != fast) {
pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
x, slow ? "yes" : "no", fast ? "yes" : "no");
goto err;
}
if (!slow)
continue;
if (next_prime_number(last) != x) {
pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
last, x, next_prime_number(last));
goto err;
}
last = x;
}
pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
return 0;
err:
dump_primes();
return -EINVAL;
}
static int __init primes_init(void)
{
return selftest(selftest_max);
}
static void __exit primes_exit(void)
{
free_primes();
}
module_init(primes_init);
module_exit(primes_exit);
module_param_named(selftest, selftest_max, ulong, 0400);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
| linux-master | lib/math/prime_numbers.c |
// SPDX-License-Identifier: GPL-2.0
/*
* An integer based power function
*
* Derived from drivers/video/backlight/pwm_bl.c
*/
#include <linux/export.h>
#include <linux/math.h>
#include <linux/types.h>
/**
* int_pow - computes the exponentiation of the given base and exponent
* @base: base which will be raised to the given power
* @exp: power to be raised to
*
* Computes: pow(base, exp), i.e. @base raised to the @exp power
*/
u64 int_pow(u64 base, unsigned int exp)
{
u64 result = 1;
while (exp) {
if (exp & 1)
result *= base;
exp >>= 1;
base *= base;
}
return result;
}
EXPORT_SYMBOL_GPL(int_pow);
| linux-master | lib/math/int_pow.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/limits.h>
#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/types.h>
#include <linux/reciprocal_div.h>
/*
* For a description of the algorithm please have a look at
* include/linux/reciprocal_div.h
*/
struct reciprocal_value reciprocal_value(u32 d)
{
struct reciprocal_value R;
u64 m;
int l;
l = fls(d - 1);
m = ((1ULL << 32) * ((1ULL << l) - d));
do_div(m, d);
++m;
R.m = (u32)m;
R.sh1 = min(l, 1);
R.sh2 = max(l - 1, 0);
return R;
}
EXPORT_SYMBOL(reciprocal_value);
struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
{
struct reciprocal_value_adv R;
u32 l, post_shift;
u64 mhigh, mlow;
/* ceil(log2(d)) */
l = fls(d - 1);
/* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
* be handled before calling "reciprocal_value_adv", please see the
* comment at include/linux/reciprocal_div.h.
*/
WARN(l == 32,
"ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
d, __func__);
post_shift = l;
mlow = 1ULL << (32 + l);
do_div(mlow, d);
mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
do_div(mhigh, d);
for (; post_shift > 0; post_shift--) {
u64 lo = mlow >> 1, hi = mhigh >> 1;
if (lo >= hi)
break;
mlow = lo;
mhigh = hi;
}
R.m = (u32)mhigh;
R.sh = post_shift;
R.exp = l;
R.is_wide_m = mhigh > U32_MAX;
return R;
}
EXPORT_SYMBOL(reciprocal_value_adv);
| linux-master | lib/math/reciprocal_div.c |
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
* Copyright (c) Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include <linux/module.h>
#include "common/huf.h"
#include "common/fse.h"
#include "common/zstd_internal.h"
// Export symbols shared by compress and decompress into a common module
#undef ZSTD_isError /* defined within zstd_internal.h */
EXPORT_SYMBOL_GPL(FSE_readNCount);
EXPORT_SYMBOL_GPL(HUF_readStats);
EXPORT_SYMBOL_GPL(HUF_readStats_wksp);
EXPORT_SYMBOL_GPL(ZSTD_isError);
EXPORT_SYMBOL_GPL(ZSTD_getErrorName);
EXPORT_SYMBOL_GPL(ZSTD_getErrorCode);
EXPORT_SYMBOL_GPL(ZSTD_customMalloc);
EXPORT_SYMBOL_GPL(ZSTD_customCalloc);
EXPORT_SYMBOL_GPL(ZSTD_customFree);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Zstd Common");
| linux-master | lib/zstd/zstd_common_module.c |
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
* Copyright (c) Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/zstd.h>
#include "common/zstd_deps.h"
/* Common symbols. zstd_compress must depend on zstd_decompress. */
unsigned int zstd_is_error(size_t code)
{
return ZSTD_isError(code);
}
EXPORT_SYMBOL(zstd_is_error);
zstd_error_code zstd_get_error_code(size_t code)
{
return ZSTD_getErrorCode(code);
}
EXPORT_SYMBOL(zstd_get_error_code);
const char *zstd_get_error_name(size_t code)
{
return ZSTD_getErrorName(code);
}
EXPORT_SYMBOL(zstd_get_error_name);
/* Decompression symbols. */
size_t zstd_dctx_workspace_bound(void)
{
return ZSTD_estimateDCtxSize();
}
EXPORT_SYMBOL(zstd_dctx_workspace_bound);
zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
return NULL;
return ZSTD_initStaticDCtx(workspace, workspace_size);
}
EXPORT_SYMBOL(zstd_init_dctx);
size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size)
{
return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
}
EXPORT_SYMBOL(zstd_decompress_dctx);
size_t zstd_dstream_workspace_bound(size_t max_window_size)
{
return ZSTD_estimateDStreamSize(max_window_size);
}
EXPORT_SYMBOL(zstd_dstream_workspace_bound);
zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
size_t workspace_size)
{
if (workspace == NULL)
return NULL;
(void)max_window_size;
return ZSTD_initStaticDStream(workspace, workspace_size);
}
EXPORT_SYMBOL(zstd_init_dstream);
size_t zstd_reset_dstream(zstd_dstream *dstream)
{
return ZSTD_resetDStream(dstream);
}
EXPORT_SYMBOL(zstd_reset_dstream);
size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
zstd_in_buffer *input)
{
return ZSTD_decompressStream(dstream, output, input);
}
EXPORT_SYMBOL(zstd_decompress_stream);
size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
{
return ZSTD_findFrameCompressedSize(src, src_size);
}
EXPORT_SYMBOL(zstd_find_frame_compressed_size);
size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
size_t src_size)
{
return ZSTD_getFrameHeader(header, src, src_size);
}
EXPORT_SYMBOL(zstd_get_frame_header);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Zstd Decompressor");
| linux-master | lib/zstd/zstd_decompress_module.c |
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
* Copyright (c) Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/zstd.h>
#include "common/zstd_deps.h"
#include "common/zstd_internal.h"
#define ZSTD_FORWARD_IF_ERR(ret) \
do { \
size_t const __ret = (ret); \
if (ZSTD_isError(__ret)) \
return __ret; \
} while (0)
static size_t zstd_cctx_init(zstd_cctx *cctx, const zstd_parameters *parameters,
unsigned long long pledged_src_size)
{
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_reset(
cctx, ZSTD_reset_session_and_parameters));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setPledgedSrcSize(
cctx, pledged_src_size));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_windowLog, parameters->cParams.windowLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_hashLog, parameters->cParams.hashLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_chainLog, parameters->cParams.chainLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_searchLog, parameters->cParams.searchLog));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_minMatch, parameters->cParams.minMatch));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_targetLength, parameters->cParams.targetLength));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_strategy, parameters->cParams.strategy));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_contentSizeFlag, parameters->fParams.contentSizeFlag));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_checksumFlag, parameters->fParams.checksumFlag));
ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
cctx, ZSTD_c_dictIDFlag, !parameters->fParams.noDictIDFlag));
return 0;
}
int zstd_min_clevel(void)
{
return ZSTD_minCLevel();
}
EXPORT_SYMBOL(zstd_min_clevel);
int zstd_max_clevel(void)
{
return ZSTD_maxCLevel();
}
EXPORT_SYMBOL(zstd_max_clevel);
size_t zstd_compress_bound(size_t src_size)
{
return ZSTD_compressBound(src_size);
}
EXPORT_SYMBOL(zstd_compress_bound);
zstd_parameters zstd_get_params(int level,
unsigned long long estimated_src_size)
{
return ZSTD_getParams(level, estimated_src_size, 0);
}
EXPORT_SYMBOL(zstd_get_params);
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCCtxSize_usingCParams(*cparams);
}
EXPORT_SYMBOL(zstd_cctx_workspace_bound);
zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
{
if (workspace == NULL)
return NULL;
return ZSTD_initStaticCCtx(workspace, workspace_size);
}
EXPORT_SYMBOL(zstd_init_cctx);
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters)
{
ZSTD_FORWARD_IF_ERR(zstd_cctx_init(cctx, parameters, src_size));
return ZSTD_compress2(cctx, dst, dst_capacity, src, src_size);
}
EXPORT_SYMBOL(zstd_compress_cctx);
size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
{
return ZSTD_estimateCStreamSize_usingCParams(*cparams);
}
EXPORT_SYMBOL(zstd_cstream_workspace_bound);
zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
{
zstd_cstream *cstream;
if (workspace == NULL)
return NULL;
cstream = ZSTD_initStaticCStream(workspace, workspace_size);
if (cstream == NULL)
return NULL;
/* 0 means unknown in linux zstd API but means 0 in new zstd API */
if (pledged_src_size == 0)
pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
if (ZSTD_isError(zstd_cctx_init(cstream, parameters, pledged_src_size)))
return NULL;
return cstream;
}
EXPORT_SYMBOL(zstd_init_cstream);
size_t zstd_reset_cstream(zstd_cstream *cstream,
unsigned long long pledged_src_size)
{
if (pledged_src_size == 0)
pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_reset(cstream, ZSTD_reset_session_only) );
ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_setPledgedSrcSize(cstream, pledged_src_size) );
return 0;
}
EXPORT_SYMBOL(zstd_reset_cstream);
size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
zstd_in_buffer *input)
{
return ZSTD_compressStream(cstream, output, input);
}
EXPORT_SYMBOL(zstd_compress_stream);
size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
{
return ZSTD_flushStream(cstream, output);
}
EXPORT_SYMBOL(zstd_flush_stream);
size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
{
return ZSTD_endStream(cstream, output);
}
EXPORT_SYMBOL(zstd_end_stream);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Zstd Compressor");
| linux-master | lib/zstd/zstd_compress_module.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
* Dependencies
***************************************/
#include "zstd_compress_superblock.h"
#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
#include "hist.h" /* HIST_countFast_wksp */
#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
#include "zstd_compress_sequences.h"
#include "zstd_compress_literals.h"
/* ZSTD_compressSubBlock_literal() :
* Compresses literals section for a sub-block.
* When we have to write the Huffman table we will sometimes choose a header
* size larger than necessary. This is because we have to pick the header size
* before we know the table size + compressed size, so we have a bound on the
* table size. If we guessed incorrectly, we fall back to uncompressed literals.
*
* We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
* in writing the header, otherwise it is set to 0.
*
* hufMetadata->hType has literals block type info.
* If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
* If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
* If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
* If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
* and the following sub-blocks' literals sections will be Treeless_Literals_Block.
* @return : compressed size of literals section of a sub-block
* Or 0 if it unable to compress.
* Or error code */
static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
const BYTE* literals, size_t litSize,
void* dst, size_t dstSize,
const int bmi2, int writeEntropy, int* entropyWritten)
{
size_t const header = writeEntropy ? 200 : 0;
size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart + lhSize;
U32 const singleStream = lhSize == 3;
symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
size_t cLitSize = 0;
(void)bmi2; /* TODO bmi2... */
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
*entropyWritten = 0;
if (litSize == 0 || hufMetadata->hType == set_basic) {
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
} else if (hufMetadata->hType == set_rle) {
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
}
assert(litSize > 0);
assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
if (writeEntropy && hufMetadata->hType == set_compressed) {
ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
op += hufMetadata->hufDesSize;
cLitSize += hufMetadata->hufDesSize;
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
}
/* TODO bmi2 */
{ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
: HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
op += cSize;
cLitSize += cSize;
if (cSize == 0 || ERR_isError(cSize)) {
DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
return 0;
}
/* If we expand and we aren't writing a header then emit uncompressed */
if (!writeEntropy && cLitSize >= litSize) {
DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
}
/* If we are writing headers then allow expansion that doesn't change our header size. */
if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
assert(cLitSize > litSize);
DEBUGLOG(5, "Literals expanded beyond allowed header size");
return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
}
DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
}
/* Build header */
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
case 4: /* 2 - 2 - 14 - 14 */
{ U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
MEM_writeLE32(ostart, lhc);
break;
}
case 5: /* 2 - 2 - 18 - 18 */
{ U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
MEM_writeLE32(ostart, lhc);
ostart[4] = (BYTE)(cLitSize >> 10);
break;
}
default: /* not possible : lhSize is {3,4,5} */
assert(0);
}
*entropyWritten = 1;
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
return op-ostart;
}
static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
const seqDef* const sstart = sequences;
const seqDef* const send = sequences + nbSeq;
const seqDef* sp = sstart;
size_t matchLengthSum = 0;
size_t litLengthSum = 0;
(void)(litLengthSum); /* suppress unused variable warning on some environments */
while (send-sp > 0) {
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
litLengthSum += seqLen.litLength;
matchLengthSum += seqLen.matchLength;
sp++;
}
assert(litLengthSum <= litSize);
if (!lastSequence) {
assert(litLengthSum == litSize);
}
return matchLengthSum + litSize;
}
/* ZSTD_compressSubBlock_sequences() :
* Compresses sequences section for a sub-block.
* fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
* symbol compression modes for the super-block.
* The first successfully compressed block will have these in its header.
* We set entropyWritten=1 when we succeed in compressing the sequences.
* The following sub-blocks will always have repeat mode.
* @return : compressed size of sequences section of a sub-block
* Or 0 if it is unable to compress
* Or error code. */
static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
const seqDef* sequences, size_t nbSeq,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const int bmi2, int writeEntropy, int* entropyWritten)
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
BYTE* seqHead;
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
*entropyWritten = 0;
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "");
if (nbSeq < 0x7F)
*op++ = (BYTE)nbSeq;
else if (nbSeq < LONGNBSEQ)
op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
else
op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
if (nbSeq==0) {
return op - ostart;
}
/* seqHead : flags for FSE encoding type */
seqHead = op++;
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
if (writeEntropy) {
const U32 LLtype = fseMetadata->llType;
const U32 Offtype = fseMetadata->ofType;
const U32 MLtype = fseMetadata->mlType;
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
op += fseMetadata->fseTablesSize;
} else {
const U32 repeat = set_repeat;
*seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
op, oend - op,
fseTables->matchlengthCTable, mlCode,
fseTables->offcodeCTable, ofCode,
fseTables->litlengthCTable, llCode,
sequences, nbSeq,
longOffsets, bmi2);
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
op += bitstreamSize;
/* zstd versions <= 1.3.4 mistakenly report corruption when
* FSE_readNCount() receives a buffer < 4 bytes.
* Fixed by https://github.com/facebook/zstd/pull/1146.
* This can happen when the last set_compressed table present is 2
* bytes and the bitstream is only one byte.
* In this exceedingly rare case, we will simply emit an uncompressed
* block, since it isn't worth optimizing.
*/
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
/* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
assert(fseMetadata->lastCountSize + bitstreamSize == 3);
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
"emitting an uncompressed block.");
return 0;
}
#endif
DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
}
/* zstd versions <= 1.4.0 mistakenly report error when
* sequences section body size is less than 3 bytes.
* Fixed by https://github.com/facebook/zstd/pull/1664.
* This can happen when the previous sequences section block is compressed
* with rle mode and the current block's sequences section is compressed
* with repeat mode where sequences section body size can be 1 byte.
*/
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
if (op-seqHead < 4) {
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
"an uncompressed block when sequences are < 4 bytes");
return 0;
}
#endif
*entropyWritten = 1;
return op - ostart;
}
/* ZSTD_compressSubBlock() :
* Compresses a single sub-block.
* @return : compressed size of the sub-block
* Or 0 if it failed to compress. */
static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
const seqDef* sequences, size_t nbSeq,
const BYTE* literals, size_t litSize,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const int bmi2,
int writeLitEntropy, int writeSeqEntropy,
int* litEntropyWritten, int* seqEntropyWritten,
U32 lastBlock)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart + ZSTD_blockHeaderSize;
DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
{ size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
&entropyMetadata->hufMetadata, literals, litSize,
op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
if (cLitSize == 0) return 0;
op += cLitSize;
}
{ size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
&entropyMetadata->fseMetadata,
sequences, nbSeq,
llCode, mlCode, ofCode,
cctxParams,
op, oend-op,
bmi2, writeSeqEntropy, seqEntropyWritten);
FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
if (cSeqSize == 0) return 0;
op += cSeqSize;
}
/* Write block header */
{ size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(ostart, cBlockHeader24);
}
return op-ostart;
}
static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
const ZSTD_hufCTables_t* huf,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
unsigned* const countWksp = (unsigned*)workspace;
unsigned maxSymbolValue = 255;
size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
if (hufMetadata->hType == set_basic) return litSize;
else if (hufMetadata->hType == set_rle) return 1;
else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
if (ZSTD_isError(largest)) return litSize;
{ size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
return cLitSizeEstimate + literalSectionHeaderSize;
} }
assert(0); /* impossible */
return 0;
}
static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
const BYTE* codeTable, unsigned maxCode,
size_t nbSeq, const FSE_CTable* fseCTable,
const U8* additionalBits,
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
void* workspace, size_t wkspSize)
{
unsigned* const countWksp = (unsigned*)workspace;
const BYTE* ctp = codeTable;
const BYTE* const ctStart = ctp;
const BYTE* const ctEnd = ctStart + nbSeq;
size_t cSymbolTypeSizeEstimateInBits = 0;
unsigned max = maxCode;
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
if (type == set_basic) {
/* We selected this encoding type, so it must be valid. */
assert(max <= defaultMax);
cSymbolTypeSizeEstimateInBits = max <= defaultMax
? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
: ERROR(GENERIC);
} else if (type == set_rle) {
cSymbolTypeSizeEstimateInBits = 0;
} else if (type == set_compressed || type == set_repeat) {
cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
}
if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
while (ctp < ctEnd) {
if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
ctp++;
}
return cSymbolTypeSizeEstimateInBits / 8;
}
static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
size_t cSeqSizeEstimate = 0;
if (nbSeq == 0) return sequencesSectionHeaderSize;
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
nbSeq, fseTables->offcodeCTable, NULL,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
nbSeq, fseTables->litlengthCTable, LL_bits,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
nbSeq, fseTables->matchlengthCTable, ML_bits,
ML_defaultNorm, ML_defaultNormLog, MaxML,
workspace, wkspSize);
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
return cSeqSizeEstimate + sequencesSectionHeaderSize;
}
static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize,
int writeLitEntropy, int writeSeqEntropy) {
size_t cSizeEstimate = 0;
cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
&entropy->huf, &entropyMetadata->hufMetadata,
workspace, wkspSize, writeLitEntropy);
cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
workspace, wkspSize, writeSeqEntropy);
return cSizeEstimate + ZSTD_blockHeaderSize;
}
static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
{
if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
return 1;
if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
return 1;
if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
return 1;
return 0;
}
/* ZSTD_compressSubBlock_multi() :
* Breaks super-block into multiple sub-blocks and compresses them.
* Entropy will be written to the first block.
* The following blocks will use repeat mode to compress.
* All sub-blocks are compressed blocks (no raw or rle blocks).
* @return : compressed size of the super block (which is multiple ZSTD blocks)
* Or 0 if it failed to compress. */
static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
const ZSTD_compressedBlockState_t* prevCBlock,
ZSTD_compressedBlockState_t* nextCBlock,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const int bmi2, U32 lastBlock,
void* workspace, size_t wkspSize)
{
const seqDef* const sstart = seqStorePtr->sequencesStart;
const seqDef* const send = seqStorePtr->sequences;
const seqDef* sp = sstart;
const BYTE* const lstart = seqStorePtr->litStart;
const BYTE* const lend = seqStorePtr->lit;
const BYTE* lp = lstart;
BYTE const* ip = (BYTE const*)src;
BYTE const* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
const BYTE* llCodePtr = seqStorePtr->llCode;
const BYTE* mlCodePtr = seqStorePtr->mlCode;
const BYTE* ofCodePtr = seqStorePtr->ofCode;
size_t targetCBlockSize = cctxParams->targetCBlockSize;
size_t litSize, seqCount;
int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
int writeSeqEntropy = 1;
int lastSequence = 0;
DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
(unsigned)(lend-lp), (unsigned)(send-sstart));
litSize = 0;
seqCount = 0;
do {
size_t cBlockSizeEstimate = 0;
if (sstart == send) {
lastSequence = 1;
} else {
const seqDef* const sequence = sp + seqCount;
lastSequence = sequence == send - 1;
litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
seqCount++;
}
if (lastSequence) {
assert(lp <= lend);
assert(litSize <= (size_t)(lend - lp));
litSize = (size_t)(lend - lp);
}
/* I think there is an optimization opportunity here.
* Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
* since it recalculates estimate from scratch.
* For example, it would recount literal distribution and symbol codes every time.
*/
cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
&nextCBlock->entropy, entropyMetadata,
workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
int litEntropyWritten = 0;
int seqEntropyWritten = 0;
const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
sp, seqCount,
lp, litSize,
llCodePtr, mlCodePtr, ofCodePtr,
cctxParams,
op, oend-op,
bmi2, writeLitEntropy, writeSeqEntropy,
&litEntropyWritten, &seqEntropyWritten,
lastBlock && lastSequence);
FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
if (cSize > 0 && cSize < decompressedSize) {
DEBUGLOG(5, "Committed the sub-block");
assert(ip + decompressedSize <= iend);
ip += decompressedSize;
sp += seqCount;
lp += litSize;
op += cSize;
llCodePtr += seqCount;
mlCodePtr += seqCount;
ofCodePtr += seqCount;
litSize = 0;
seqCount = 0;
/* Entropy only needs to be written once */
if (litEntropyWritten) {
writeLitEntropy = 0;
}
if (seqEntropyWritten) {
writeSeqEntropy = 0;
}
}
}
} while (!lastSequence);
if (writeLitEntropy) {
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
}
if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
/* If we haven't written our entropy tables, then we've violated our contract and
* must emit an uncompressed block.
*/
DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
return 0;
}
if (ip < iend) {
size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
assert(cSize != 0);
op += cSize;
/* We have to regenerate the repcodes because we've skipped some sequences */
if (sp < send) {
seqDef const* seq;
repcodes_t rep;
ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
for (seq = sstart; seq < sp; ++seq) {
ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
}
ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
}
}
DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
return op-ostart;
}
size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
void const* src, size_t srcSize,
unsigned lastBlock) {
ZSTD_entropyCTablesMetadata_t entropyMetadata;
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
&zc->blockState.prevCBlock->entropy,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
&entropyMetadata,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
return ZSTD_compressSubBlock_multi(&zc->seqStore,
zc->blockState.prevCBlock,
zc->blockState.nextCBlock,
&entropyMetadata,
&zc->appliedParams,
dst, dstCapacity,
src, srcSize,
zc->bmi2, lastBlock,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
}
| linux-master | lib/zstd/compress/zstd_compress_superblock.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_compress_internal.h"
#include "zstd_double_fast.h"
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashLarge = ms->hashTable;
U32 const hBitsL = cParams->hashLog;
U32 const mls = cParams->minMatch;
U32* const hashSmall = ms->chainTable;
U32 const hBitsS = cParams->chainLog;
const BYTE* const base = ms->window.base;
const BYTE* ip = base + ms->nextToUpdate;
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
/* Always insert every fastHashFillStep position into the hash tables.
* Insert the other positions into the large hash table if their entry
* is empty.
*/
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
U32 const curr = (U32)(ip - base);
U32 i;
for (i = 0; i < fastHashFillStep; ++i) {
size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
if (i == 0)
hashSmall[smHash] = curr + i;
if (i == 0 || hashLarge[lgHash] == 0)
hashLarge[lgHash] = curr + i;
/* Only load extra positions for ZSTD_dtlm_full */
if (dtlm == ZSTD_dtlm_fast)
break;
} }
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
const U32 hBitsL = cParams->hashLog;
U32* const hashSmall = ms->chainTable;
const U32 hBitsS = cParams->chainLog;
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
/* presumes that, if there is a dictionary, it must be using Attach mode */
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixLowest = base + prefixLowestIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
U32 offsetSaved = 0;
size_t mLength;
U32 offset;
U32 curr;
/* how many positions to search before increasing step size */
const size_t kStepIncr = 1 << kSearchStrength;
/* the position at which to increment the step size if no match is found */
const BYTE* nextStep;
size_t step; /* the current step size */
size_t hl0; /* the long hash at ip */
size_t hl1; /* the long hash at ip1 */
U32 idxl0; /* the long match index for ip */
U32 idxl1; /* the long match index for ip1 */
const BYTE* matchl0; /* the long match for ip */
const BYTE* matchs0; /* the short match for ip */
const BYTE* matchl1; /* the long match for ip1 */
const BYTE* ip = istart; /* the current position */
const BYTE* ip1; /* the next position */
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
/* init */
ip += ((ip - prefixLowest) == 0);
{
U32 const current = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
U32 const maxRep = current - windowLow;
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
/* Outer Loop: one iteration per match found and stored */
while (1) {
step = 1;
nextStep = ip + kStepIncr;
ip1 = ip + step;
if (ip1 > ilimit) {
goto _cleanup;
}
hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
idxl0 = hashLong[hl0];
matchl0 = base + idxl0;
/* Inner Loop: one iteration per search / position */
do {
const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
const U32 idxs0 = hashSmall[hs0];
curr = (U32)(ip-base);
matchs0 = base + idxs0;
hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
/* check noDict repcode */
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
goto _match_stored;
}
hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
if (idxl0 > prefixLowestIndex) {
/* check prefix long match */
if (MEM_read64(matchl0) == MEM_read64(ip)) {
mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
offset = (U32)(ip-matchl0);
while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
goto _match_found;
}
}
idxl1 = hashLong[hl1];
matchl1 = base + idxl1;
if (idxs0 > prefixLowestIndex) {
/* check prefix short match */
if (MEM_read32(matchs0) == MEM_read32(ip)) {
goto _search_next_long;
}
}
if (ip1 >= nextStep) {
PREFETCH_L1(ip1 + 64);
PREFETCH_L1(ip1 + 128);
step++;
nextStep += kStepIncr;
}
ip = ip1;
ip1 += step;
hl0 = hl1;
idxl0 = idxl1;
matchl0 = matchl1;
#if defined(__aarch64__)
PREFETCH_L1(ip+256);
#endif
} while (ip1 <= ilimit);
_cleanup:
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
rep[1] = offset_2 ? offset_2 : offsetSaved;
/* Return the last literals size */
return (size_t)(iend - anchor);
_search_next_long:
/* check prefix long +1 match */
if (idxl1 > prefixLowestIndex) {
if (MEM_read64(matchl1) == MEM_read64(ip1)) {
ip = ip1;
mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
offset = (U32)(ip-matchl1);
while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
goto _match_found;
}
}
/* if no long +1 match, explore the short match we found */
mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
offset = (U32)(ip - matchs0);
while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
/* fall-through */
_match_found: /* requires ip, offset, mLength */
offset_2 = offset_1;
offset_1 = offset;
if (step < 4) {
/* It is unsafe to write this value back to the hashtable when ip1 is
* greater than or equal to the new ip we will have after we're done
* processing this match. Rather than perform that test directly
* (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
* more predictable test. The minmatch even if we take a short match is
* 4 bytes, so as long as step, the distance between ip and ip1
* (initially) is less than 4, we know ip1 < new ip. */
hashLong[hl1] = (U32)(ip1 - base);
}
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
_match_stored:
/* match found */
ip += mLength;
anchor = ip;
if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
{ U32 const indexToInsert = curr+2;
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
}
/* check immediate repcode */
while ( (ip <= ilimit)
&& ( (offset_2>0)
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
/* store sequence */
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength);
ip += rLength;
anchor = ip;
continue; /* faster when present ... (?) */
}
}
}
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
const U32 hBitsL = cParams->hashLog;
U32* const hashSmall = ms->chainTable;
const U32 hBitsS = cParams->chainLog;
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
/* presumes that, if there is a dictionary, it must be using Attach mode */
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixLowest = base + prefixLowestIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
U32 offsetSaved = 0;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
const U32* const dictHashLong = dms->hashTable;
const U32* const dictHashSmall = dms->chainTable;
const U32 dictStartIndex = dms->window.dictLimit;
const BYTE* const dictBase = dms->window.base;
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
const U32 dictHBitsL = dictCParams->hashLog;
const U32 dictHBitsS = dictCParams->chainLog;
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
/* if a dictionary is attached, it must be within window range */
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
/* init */
ip += (dictAndPrefixLength == 0);
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
/* Main Search Loop */
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
size_t mLength;
U32 offset;
size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
U32 const curr = (U32)(ip-base);
U32 const matchIndexL = hashLong[h2];
U32 matchIndexS = hashSmall[h];
const BYTE* matchLong = base + matchIndexL;
const BYTE* match = base + matchIndexS;
const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
/* check repcode */
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
goto _match_stored;
}
if (matchIndexL > prefixLowestIndex) {
/* check prefix long match */
if (MEM_read64(matchLong) == MEM_read64(ip)) {
mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
offset = (U32)(ip-matchLong);
while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
goto _match_found;
}
} else {
/* check dictMatchState long match */
U32 const dictMatchIndexL = dictHashLong[dictHL];
const BYTE* dictMatchL = dictBase + dictMatchIndexL;
assert(dictMatchL < dictEnd);
if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
goto _match_found;
} }
if (matchIndexS > prefixLowestIndex) {
/* check prefix short match */
if (MEM_read32(match) == MEM_read32(ip)) {
goto _search_next_long;
}
} else {
/* check dictMatchState short match */
U32 const dictMatchIndexS = dictHashSmall[dictHS];
match = dictBase + dictMatchIndexS;
matchIndexS = dictMatchIndexS + dictIndexDelta;
if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
goto _search_next_long;
} }
ip += ((ip-anchor) >> kSearchStrength) + 1;
#if defined(__aarch64__)
PREFETCH_L1(ip+256);
#endif
continue;
_search_next_long:
{ size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
U32 const matchIndexL3 = hashLong[hl3];
const BYTE* matchL3 = base + matchIndexL3;
hashLong[hl3] = curr + 1;
/* check prefix long +1 match */
if (matchIndexL3 > prefixLowestIndex) {
if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
ip++;
offset = (U32)(ip-matchL3);
while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
goto _match_found;
}
} else {
/* check dict long +1 match */
U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
assert(dictMatchL3 < dictEnd);
if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
ip++;
offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
goto _match_found;
} } }
/* if no long +1 match, explore the short match we found */
if (matchIndexS < prefixLowestIndex) {
mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
offset = (U32)(curr - matchIndexS);
while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
} else {
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
offset = (U32)(ip - match);
while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
}
_match_found:
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
_match_stored:
/* match found */
ip += mLength;
anchor = ip;
if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
{ U32 const indexToInsert = curr+2;
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
}
/* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
dictBase + repIndex2 - dictIndexDelta :
base + repIndex2;
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
anchor = ip;
continue;
}
break;
}
}
} /* while (ip < ilimit) */
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
rep[1] = offset_2 ? offset_2 : offsetSaved;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
}
ZSTD_GEN_DFAST_FN(noDict, 4)
ZSTD_GEN_DFAST_FN(noDict, 5)
ZSTD_GEN_DFAST_FN(noDict, 6)
ZSTD_GEN_DFAST_FN(noDict, 7)
ZSTD_GEN_DFAST_FN(dictMatchState, 4)
ZSTD_GEN_DFAST_FN(dictMatchState, 5)
ZSTD_GEN_DFAST_FN(dictMatchState, 6)
ZSTD_GEN_DFAST_FN(dictMatchState, 7)
size_t ZSTD_compressBlock_doubleFast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
}
}
size_t ZSTD_compressBlock_doubleFast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
const U32 mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
}
}
static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
U32 const hBitsL = cParams->hashLog;
U32* const hashSmall = ms->chainTable;
U32 const hBitsS = cParams->chainLog;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ms->window.base;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
const U32 dictStartIndex = lowLimit;
const U32 dictLimit = ms->window.dictLimit;
const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dictBase + prefixStartIndex;
U32 offset_1=rep[0], offset_2=rep[1];
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
/* if extDict is invalidated due to maxDistance, switch to "regular" variant */
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
const U32 matchIndex = hashSmall[hSmall];
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
const BYTE* match = matchBase + matchIndex;
const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
const U32 matchLongIndex = hashLong[hLong];
const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
const BYTE* matchLong = matchLongBase + matchLongIndex;
const U32 curr = (U32)(ip-base);
const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
size_t mLength;
hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
& (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
} else {
if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
U32 offset;
mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
offset = curr - matchLongIndex;
while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
} else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
U32 const matchIndex3 = hashLong[h3];
const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
const BYTE* match3 = match3Base + matchIndex3;
U32 offset;
hashLong[h3] = curr + 1;
if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
ip++;
offset = curr+1 - matchIndex3;
while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
} else {
const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
offset = curr - matchIndex;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
}
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
} else {
ip += ((ip-anchor) >> kSearchStrength) + 1;
continue;
} }
/* move to next sequence start */
ip += mLength;
anchor = ip;
if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
{ U32 const indexToInsert = curr+2;
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
}
/* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
& (offset_2 <= current2 - dictStartIndex))
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
anchor = ip;
continue;
}
break;
} } }
/* save reps for next block */
rep[0] = offset_1;
rep[1] = offset_2;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
ZSTD_GEN_DFAST_FN(extDict, 4)
ZSTD_GEN_DFAST_FN(extDict, 5)
ZSTD_GEN_DFAST_FN(extDict, 6)
ZSTD_GEN_DFAST_FN(extDict, 7)
size_t ZSTD_compressBlock_doubleFast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
}
}
| linux-master | lib/zstd/compress/zstd_double_fast.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
#include "zstd_fast.h"
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hBits = cParams->hashLog;
U32 const mls = cParams->minMatch;
const BYTE* const base = ms->window.base;
const BYTE* ip = base + ms->nextToUpdate;
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
/* Always insert every fastHashFillStep position into the hash table.
* Insert the other positions if their hash entry is empty.
*/
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
U32 const curr = (U32)(ip - base);
size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
hashTable[hash0] = curr;
if (dtlm == ZSTD_dtlm_fast) continue;
/* Only load extra positions for ZSTD_dtlm_full */
{ U32 p;
for (p = 1; p < fastHashFillStep; ++p) {
size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
if (hashTable[hash] == 0) { /* not yet filled */
hashTable[hash] = curr + p;
} } } }
}
/*
* If you squint hard enough (and ignore repcodes), the search operation at any
* given position is broken into 4 stages:
*
* 1. Hash (map position to hash value via input read)
* 2. Lookup (map hash val to index via hashtable read)
* 3. Load (map index to value at that position via input read)
* 4. Compare
*
* Each of these steps involves a memory read at an address which is computed
* from the previous step. This means these steps must be sequenced and their
* latencies are cumulative.
*
* Rather than do 1->2->3->4 sequentially for a single position before moving
* onto the next, this implementation interleaves these operations across the
* next few positions:
*
* R = Repcode Read & Compare
* H = Hash
* T = Table Lookup
* M = Match Read & Compare
*
* Pos | Time -->
* ----+-------------------
* N | ... M
* N+1 | ... TM
* N+2 | R H T M
* N+3 | H TM
* N+4 | R H T M
* N+5 | H ...
* N+6 | R ...
*
* This is very much analogous to the pipelining of execution in a CPU. And just
* like a CPU, we have to dump the pipeline when we find a match (i.e., take a
* branch).
*
* When this happens, we throw away our current state, and do the following prep
* to re-enter the loop:
*
* Pos | Time -->
* ----+-------------------
* N | H T
* N+1 | H
*
* This is also the work we do at the beginning to enter the loop initially.
*/
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_fast_noDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize,
U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
const BYTE* anchor = istart;
const BYTE* ip0 = istart;
const BYTE* ip1;
const BYTE* ip2;
const BYTE* ip3;
U32 current0;
U32 rep_offset1 = rep[0];
U32 rep_offset2 = rep[1];
U32 offsetSaved = 0;
size_t hash0; /* hash for ip0 */
size_t hash1; /* hash for ip1 */
U32 idx; /* match idx for ip0 */
U32 mval; /* src value at match idx */
U32 offcode;
const BYTE* match0;
size_t mLength;
/* ip0 and ip1 are always adjacent. The targetLength skipping and
* uncompressibility acceleration is applied to every other position,
* matching the behavior of #1562. step therefore represents the gap
* between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
size_t step;
const BYTE* nextStep;
const size_t kStepIncr = (1 << (kSearchStrength - 1));
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
ip0 += (ip0 == prefixStart);
{ U32 const curr = (U32)(ip0 - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
U32 const maxRep = curr - windowLow;
if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0;
if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0;
}
/* start each op */
_start: /* Requires: ip0 */
step = stepSize;
nextStep = ip0 + kStepIncr;
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
ip1 = ip0 + 1;
ip2 = ip0 + step;
ip3 = ip2 + 1;
if (ip3 >= ilimit) {
goto _cleanup;
}
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
idx = hashTable[hash0];
do {
/* load repcode match for ip[2]*/
const U32 rval = MEM_read32(ip2 - rep_offset1);
/* write back hash table entry */
current0 = (U32)(ip0 - base);
hashTable[hash0] = current0;
/* check repcode at ip[2] */
if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
ip0 = ip2;
match0 = ip0 - rep_offset1;
mLength = ip0[-1] == match0[-1];
ip0 -= mLength;
match0 -= mLength;
offcode = STORE_REPCODE_1;
mLength += 4;
goto _match;
}
/* load match for ip[0] */
if (idx >= prefixStartIndex) {
mval = MEM_read32(base + idx);
} else {
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
}
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
goto _offset;
}
/* lookup ip[1] */
idx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
/* advance to next positions */
ip0 = ip1;
ip1 = ip2;
ip2 = ip3;
/* write back hash table entry */
current0 = (U32)(ip0 - base);
hashTable[hash0] = current0;
/* load match for ip[0] */
if (idx >= prefixStartIndex) {
mval = MEM_read32(base + idx);
} else {
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
}
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
goto _offset;
}
/* lookup ip[1] */
idx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
/* advance to next positions */
ip0 = ip1;
ip1 = ip2;
ip2 = ip0 + step;
ip3 = ip1 + step;
/* calculate step */
if (ip2 >= nextStep) {
step++;
PREFETCH_L1(ip1 + 64);
PREFETCH_L1(ip1 + 128);
nextStep += kStepIncr;
}
} while (ip3 < ilimit);
_cleanup:
/* Note that there are probably still a couple positions we could search.
* However, it seems to be a meaningful performance hit to try to search
* them. So let's not. */
/* save reps for next block */
rep[0] = rep_offset1 ? rep_offset1 : offsetSaved;
rep[1] = rep_offset2 ? rep_offset2 : offsetSaved;
/* Return the last literals size */
return (size_t)(iend - anchor);
_offset: /* Requires: ip0, idx */
/* Compute the offset code. */
match0 = base + idx;
rep_offset2 = rep_offset1;
rep_offset1 = (U32)(ip0-match0);
offcode = STORE_OFFSET(rep_offset1);
mLength = 4;
/* Count the backwards match length. */
while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
ip0--;
match0--;
mLength++;
}
_match: /* Requires: ip0, match0, offcode */
/* Count the forward length. */
mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
ip0 += mLength;
anchor = ip0;
/* write next hash table entry */
if (ip1 < ip0) {
hashTable[hash1] = (U32)(ip1 - base);
}
/* Fill table and check for immediate repcode. */
if (ip0 <= ilimit) {
/* Fill Table */
assert(base+current0+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
/* store sequence */
size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength);
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
} } }
goto _start;
}
#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
}
ZSTD_GEN_FAST_FN(noDict, 4, 1)
ZSTD_GEN_FAST_FN(noDict, 5, 1)
ZSTD_GEN_FAST_FN(noDict, 6, 1)
ZSTD_GEN_FAST_FN(noDict, 7, 1)
ZSTD_GEN_FAST_FN(noDict, 4, 0)
ZSTD_GEN_FAST_FN(noDict, 5, 0)
ZSTD_GEN_FAST_FN(noDict, 6, 0)
ZSTD_GEN_FAST_FN(noDict, 7, 0)
size_t ZSTD_compressBlock_fast(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
assert(ms->dictMatchState == NULL);
if (ms->cParams.targetLength > 1) {
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
}
} else {
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
}
}
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 prefixStartIndex = ms->window.dictLimit;
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
U32 offsetSaved = 0;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
const U32* const dictHashTable = dms->hashTable;
const U32 dictStartIndex = dms->window.dictLimit;
const BYTE* const dictBase = dms->window.base;
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
const U32 dictHLog = dictCParams->hashLog;
/* if a dictionary is still attached, it necessarily means that
* it is within window size. So we just check it. */
const U32 maxDistance = 1U << cParams->windowLog;
const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
assert(endIndex - prefixStartIndex <= maxDistance);
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
(void)hasStep; /* not currently specialized on whether it's accelerated */
/* ensure there will be no underflow
* when translating a dict index into a local index */
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
/* init */
DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
ip += (dictAndPrefixLength == 0);
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
/* Main Search Loop */
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
size_t mLength;
size_t const h = ZSTD_hashPtr(ip, hlog, mls);
U32 const curr = (U32)(ip-base);
U32 const matchIndex = hashTable[h];
const BYTE* match = base + matchIndex;
const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashTable[h] = curr; /* update hash table */
if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
} else if ( (matchIndex <= prefixStartIndex) ) {
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
U32 const dictMatchIndex = dictHashTable[dictHash];
const BYTE* dictMatch = dictBase + dictMatchIndex;
if (dictMatchIndex <= dictStartIndex ||
MEM_read32(dictMatch) != MEM_read32(ip)) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
} else {
/* found a dict match */
U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
while (((ip>anchor) & (dictMatch>dictStart))
&& (ip[-1] == dictMatch[-1])) {
ip--; dictMatch--; mLength++;
} /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
}
} else if (MEM_read32(match) != MEM_read32(ip)) {
/* it's not a match, and we're not going to check the dictionary */
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
} else {
/* found a regular match */
U32 const offset = (U32)(ip-match);
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
while (((ip>anchor) & (match>prefixStart))
&& (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
}
/* match found */
ip += mLength;
anchor = ip;
if (ip <= ilimit) {
/* Fill Table */
assert(base+curr+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
dictBase - dictIndexDelta + repIndex2 :
base + repIndex2;
if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
continue;
}
break;
}
}
}
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved;
rep[1] = offset_2 ? offset_2 : offsetSaved;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
size_t ZSTD_compressBlock_fast_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
assert(ms->dictMatchState != NULL);
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
}
}
static size_t ZSTD_compressBlock_fast_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hlog = cParams->hashLog;
/* support stepSize of 0 */
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
const U32 dictStartIndex = lowLimit;
const BYTE* const dictStart = dictBase + dictStartIndex;
const U32 dictLimit = ms->window.dictLimit;
const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
const BYTE* const prefixStart = base + prefixStartIndex;
const BYTE* const dictEnd = dictBase + prefixStartIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
U32 offset_1=rep[0], offset_2=rep[1];
(void)hasStep; /* not currently specialized on whether it's accelerated */
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
if (prefixStartIndex == dictStartIndex)
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
const size_t h = ZSTD_hashPtr(ip, hlog, mls);
const U32 matchIndex = hashTable[h];
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
const BYTE* match = matchBase + matchIndex;
const U32 curr = (U32)(ip-base);
const U32 repIndex = curr + 1 - offset_1;
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
hashTable[h] = curr; /* update hash table */
DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
& (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
ip += rLength;
anchor = ip;
} else {
if ( (matchIndex < dictStartIndex) ||
(MEM_read32(match) != MEM_read32(ip)) ) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
}
{ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
U32 const offset = curr - matchIndex;
size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset_2 = offset_1; offset_1 = offset; /* update offset history */
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
ip += mLength;
anchor = ip;
} }
if (ip <= ilimit) {
/* Fill Table */
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
continue;
}
break;
} } }
/* save reps for next block */
rep[0] = offset_1;
rep[1] = offset_2;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
ZSTD_GEN_FAST_FN(extDict, 4, 0)
ZSTD_GEN_FAST_FN(extDict, 5, 0)
ZSTD_GEN_FAST_FN(extDict, 6, 0)
ZSTD_GEN_FAST_FN(extDict, 7, 0)
size_t ZSTD_compressBlock_fast_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
U32 const mls = ms->cParams.minMatch;
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
}
}
| linux-master | lib/zstd/compress/zstd_fast.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
* Dependencies
***************************************/
#include "zstd_compress_sequences.h"
/*
* -log2(x / 256) lookup table for x in [0, 256).
* If x == 0: Return 0
* Else: Return floor(-log2(x / 256) * 256)
*/
static unsigned const kInverseProbabilityLog256[256] = {
0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
5, 4, 2, 1,
};
static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
void const* ptr = ctable;
U16 const* u16ptr = (U16 const*)ptr;
U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
return maxSymbolValue;
}
/*
* Returns true if we should use ncount=-1 else we should
* use ncount=1 for low probability symbols instead.
*/
static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
{
/* Heuristic: This should cover most blocks <= 16K and
* start to fade out after 16K to about 32K depending on
* comprssibility.
*/
return nbSeq >= 2048;
}
/*
* Returns the cost in bytes of encoding the normalized count header.
* Returns an error if any of the helper functions return an error.
*/
static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
size_t const nbSeq, unsigned const FSELog)
{
BYTE wksp[FSE_NCOUNTBOUND];
S16 norm[MaxSeq + 1];
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
}
/*
* Returns the cost in bits of encoding the distribution described by count
* using the entropy bound.
*/
static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
{
unsigned cost = 0;
unsigned s;
assert(total > 0);
for (s = 0; s <= max; ++s) {
unsigned norm = (unsigned)((256 * count[s]) / total);
if (count[s] != 0 && norm == 0)
norm = 1;
assert(count[s] < total);
cost += count[s] * kInverseProbabilityLog256[norm];
}
return cost >> 8;
}
/*
* Returns the cost in bits of encoding the distribution in count using ctable.
* Returns an error if ctable cannot represent all the symbols in count.
*/
size_t ZSTD_fseBitCost(
FSE_CTable const* ctable,
unsigned const* count,
unsigned const max)
{
unsigned const kAccuracyLog = 8;
size_t cost = 0;
unsigned s;
FSE_CState_t cstate;
FSE_initCState(&cstate, ctable);
if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
ZSTD_getFSEMaxSymbolValue(ctable), max);
return ERROR(GENERIC);
}
for (s = 0; s <= max; ++s) {
unsigned const tableLog = cstate.stateLog;
unsigned const badCost = (tableLog + 1) << kAccuracyLog;
unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
if (count[s] == 0)
continue;
if (bitCost >= badCost) {
DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
return ERROR(GENERIC);
}
cost += (size_t)count[s] * bitCost;
}
return cost >> kAccuracyLog;
}
/*
* Returns the cost in bits of encoding the distribution in count using the
* table described by norm. The max symbol support by norm is assumed >= max.
* norm must be valid for every symbol with non-zero probability in count.
*/
size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
unsigned const* count, unsigned const max)
{
unsigned const shift = 8 - accuracyLog;
size_t cost = 0;
unsigned s;
assert(accuracyLog <= 8);
for (s = 0; s <= max; ++s) {
unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
unsigned const norm256 = normAcc << shift;
assert(norm256 > 0);
assert(norm256 < 256);
cost += count[s] * kInverseProbabilityLog256[norm256];
}
return cost >> 8;
}
symbolEncodingType_e
ZSTD_selectEncodingType(
FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
FSE_CTable const* prevCTable,
short const* defaultNorm, U32 defaultNormLog,
ZSTD_defaultPolicy_e const isDefaultAllowed,
ZSTD_strategy const strategy)
{
ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
if (mostFrequent == nbSeq) {
*repeatMode = FSE_repeat_none;
if (isDefaultAllowed && nbSeq <= 2) {
/* Prefer set_basic over set_rle when there are 2 or less symbols,
* since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
* If basic encoding isn't possible, always choose RLE.
*/
DEBUGLOG(5, "Selected set_basic");
return set_basic;
}
DEBUGLOG(5, "Selected set_rle");
return set_rle;
}
if (strategy < ZSTD_lazy) {
if (isDefaultAllowed) {
size_t const staticFse_nbSeq_max = 1000;
size_t const mult = 10 - strategy;
size_t const baseLog = 3;
size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
assert(mult <= 9 && mult >= 7);
if ( (*repeatMode == FSE_repeat_valid)
&& (nbSeq < staticFse_nbSeq_max) ) {
DEBUGLOG(5, "Selected set_repeat");
return set_repeat;
}
if ( (nbSeq < dynamicFse_nbSeq_min)
|| (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
DEBUGLOG(5, "Selected set_basic");
/* The format allows default tables to be repeated, but it isn't useful.
* When using simple heuristics to select encoding type, we don't want
* to confuse these tables with dictionaries. When running more careful
* analysis, we don't need to waste time checking both repeating tables
* and default tables.
*/
*repeatMode = FSE_repeat_none;
return set_basic;
}
}
} else {
size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
if (isDefaultAllowed) {
assert(!ZSTD_isError(basicCost));
assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
}
assert(!ZSTD_isError(NCountCost));
assert(compressedCost < ERROR(maxCode));
DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
(unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
if (basicCost <= repeatCost && basicCost <= compressedCost) {
DEBUGLOG(5, "Selected set_basic");
assert(isDefaultAllowed);
*repeatMode = FSE_repeat_none;
return set_basic;
}
if (repeatCost <= compressedCost) {
DEBUGLOG(5, "Selected set_repeat");
assert(!ZSTD_isError(repeatCost));
return set_repeat;
}
assert(compressedCost < basicCost && compressedCost < repeatCost);
}
DEBUGLOG(5, "Selected set_compressed");
*repeatMode = FSE_repeat_check;
return set_compressed;
}
typedef struct {
S16 norm[MaxSeq + 1];
U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
} ZSTD_BuildCTableWksp;
size_t
ZSTD_buildCTable(void* dst, size_t dstCapacity,
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
unsigned* count, U32 max,
const BYTE* codeTable, size_t nbSeq,
const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
const FSE_CTable* prevCTable, size_t prevCTableSize,
void* entropyWorkspace, size_t entropyWorkspaceSize)
{
BYTE* op = (BYTE*)dst;
const BYTE* const oend = op + dstCapacity;
DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
switch (type) {
case set_rle:
FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
*op = codeTable[0];
return 1;
case set_repeat:
ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
return 0;
case set_basic:
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
return 0;
case set_compressed: {
ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
size_t nbSeq_1 = nbSeq;
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
if (count[codeTable[nbSeq-1]] > 1) {
count[codeTable[nbSeq-1]]--;
nbSeq_1--;
}
assert(nbSeq_1 > 1);
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
(void)entropyWorkspaceSize;
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
assert(oend >= op);
{ size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
return NCountSize;
}
}
default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
}
}
FORCE_INLINE_TEMPLATE size_t
ZSTD_encodeSequences_body(
void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets)
{
BIT_CStream_t blockStream;
FSE_CState_t stateMatchLength;
FSE_CState_t stateOffsetBits;
FSE_CState_t stateLitLength;
RETURN_ERROR_IF(
ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
dstSize_tooSmall, "not enough space remaining");
DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
(int)(blockStream.endPtr - blockStream.startPtr),
(unsigned)dstCapacity);
/* first symbols */
FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
if (MEM_32bits()) BIT_flushBits(&blockStream);
if (longOffsets) {
U32 const ofBits = ofCodeTable[nbSeq-1];
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
BIT_flushBits(&blockStream);
}
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
ofBits - extraBits);
} else {
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
}
BIT_flushBits(&blockStream);
{ size_t n;
for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
BYTE const llCode = llCodeTable[n];
BYTE const ofCode = ofCodeTable[n];
BYTE const mlCode = mlCodeTable[n];
U32 const llBits = LL_bits[llCode];
U32 const ofBits = ofCode;
U32 const mlBits = ML_bits[mlCode];
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
(unsigned)sequences[n].litLength,
(unsigned)sequences[n].mlBase + MINMATCH,
(unsigned)sequences[n].offBase);
/* 32b*/ /* 64b*/
/* (7)*/ /* (7)*/
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
BIT_flushBits(&blockStream); /* (7)*/
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
if (longOffsets) {
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
if (extraBits) {
BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
BIT_flushBits(&blockStream); /* (7)*/
}
BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
ofBits - extraBits); /* 31 */
} else {
BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
}
BIT_flushBits(&blockStream); /* (7)*/
DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
} }
DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
FSE_flushCState(&blockStream, &stateMatchLength);
DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
FSE_flushCState(&blockStream, &stateOffsetBits);
DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
FSE_flushCState(&blockStream, &stateLitLength);
{ size_t const streamSize = BIT_closeCStream(&blockStream);
RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
return streamSize;
}
}
static size_t
ZSTD_encodeSequences_default(
void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq, longOffsets);
}
#if DYNAMIC_BMI2
static BMI2_TARGET_ATTRIBUTE size_t
ZSTD_encodeSequences_bmi2(
void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets)
{
return ZSTD_encodeSequences_body(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq, longOffsets);
}
#endif
size_t ZSTD_encodeSequences(
void* dst, size_t dstCapacity,
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
{
DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
#if DYNAMIC_BMI2
if (bmi2) {
return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq, longOffsets);
}
#endif
(void)bmi2;
return ZSTD_encodeSequences_default(dst, dstCapacity,
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq, longOffsets);
}
| linux-master | lib/zstd/compress/zstd_compress_sequences.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_ldm.h"
#include "../common/debug.h"
#include <linux/xxhash.h>
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
#include "zstd_ldm_geartab.h"
#define LDM_BUCKET_SIZE_LOG 3
#define LDM_MIN_MATCH_LENGTH 64
#define LDM_HASH_RLOG 7
typedef struct {
U64 rolling;
U64 stopMask;
} ldmRollingHashState_t;
/* ZSTD_ldm_gear_init():
*
* Initializes the rolling hash state such that it will honor the
* settings in params. */
static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
{
unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
unsigned hashRateLog = params->hashRateLog;
state->rolling = ~(U32)0;
/* The choice of the splitting criterion is subject to two conditions:
* 1. it has to trigger on average every 2^(hashRateLog) bytes;
* 2. ideally, it has to depend on a window of minMatchLength bytes.
*
* In the gear hash algorithm, bit n depends on the last n bytes;
* so in order to obtain a good quality splitting criterion it is
* preferable to use bits with high weight.
*
* To match condition 1 we use a mask with hashRateLog bits set
* and, because of the previous remark, we make sure these bits
* have the highest possible weight while still respecting
* condition 2.
*/
if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
} else {
/* In this degenerate case we simply honor the hash rate. */
state->stopMask = ((U64)1 << hashRateLog) - 1;
}
}
/* ZSTD_ldm_gear_reset()
* Feeds [data, data + minMatchLength) into the hash without registering any
* splits. This effectively resets the hash state. This is used when skipping
* over data, either at the beginning of a block, or skipping sections.
*/
static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
BYTE const* data, size_t minMatchLength)
{
U64 hash = state->rolling;
size_t n = 0;
#define GEAR_ITER_ONCE() do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \
} while (0)
while (n + 3 < minMatchLength) {
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
}
while (n < minMatchLength) {
GEAR_ITER_ONCE();
}
#undef GEAR_ITER_ONCE
}
/* ZSTD_ldm_gear_feed():
*
* Registers in the splits array all the split points found in the first
* size bytes following the data pointer. This function terminates when
* either all the data has been processed or LDM_BATCH_SIZE splits are
* present in the splits array.
*
* Precondition: The splits array must not be full.
* Returns: The number of bytes processed. */
static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
BYTE const* data, size_t size,
size_t* splits, unsigned* numSplits)
{
size_t n;
U64 hash, mask;
hash = state->rolling;
mask = state->stopMask;
n = 0;
#define GEAR_ITER_ONCE() do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \
if (UNLIKELY((hash & mask) == 0)) { \
splits[*numSplits] = n; \
*numSplits += 1; \
if (*numSplits == LDM_BATCH_SIZE) \
goto done; \
} \
} while (0)
while (n + 3 < size) {
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
}
while (n < size) {
GEAR_ITER_ONCE();
}
#undef GEAR_ITER_ONCE
done:
state->rolling = hash;
return n;
}
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
ZSTD_compressionParameters const* cParams)
{
params->windowLog = cParams->windowLog;
ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
if (params->hashLog == 0) {
params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
}
if (params->hashRateLog == 0) {
params->hashRateLog = params->windowLog < params->hashLog
? 0
: params->windowLog - params->hashLog;
}
params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
}
size_t ZSTD_ldm_getTableSize(ldmParams_t params)
{
size_t const ldmHSize = ((size_t)1) << params.hashLog;
size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
}
size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
{
return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
}
/* ZSTD_ldm_getBucket() :
* Returns a pointer to the start of the bucket associated with hash. */
static ldmEntry_t* ZSTD_ldm_getBucket(
ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
{
return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
}
/* ZSTD_ldm_insertEntry() :
* Insert the entry with corresponding hash into the hash table */
static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
size_t const hash, const ldmEntry_t entry,
ldmParams_t const ldmParams)
{
BYTE* const pOffset = ldmState->bucketOffsets + hash;
unsigned const offset = *pOffset;
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
*pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
}
/* ZSTD_ldm_countBackwardsMatch() :
* Returns the number of bytes that match backwards before pIn and pMatch.
*
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
static size_t ZSTD_ldm_countBackwardsMatch(
const BYTE* pIn, const BYTE* pAnchor,
const BYTE* pMatch, const BYTE* pMatchBase)
{
size_t matchLength = 0;
while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
pIn--;
pMatch--;
matchLength++;
}
return matchLength;
}
/* ZSTD_ldm_countBackwardsMatch_2segments() :
* Returns the number of bytes that match backwards from pMatch,
* even with the backwards match spanning 2 different segments.
*
* On reaching `pMatchBase`, start counting from mEnd */
static size_t ZSTD_ldm_countBackwardsMatch_2segments(
const BYTE* pIn, const BYTE* pAnchor,
const BYTE* pMatch, const BYTE* pMatchBase,
const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
{
size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
/* If backwards match is entirely in the extDict or prefix, immediately return */
return matchLength;
}
DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
DEBUGLOG(7, "final backwards match length = %zu", matchLength);
return matchLength;
}
/* ZSTD_ldm_fillFastTables() :
*
* Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
* This is similar to ZSTD_loadDictionaryContent.
*
* The tables for the other strategies are filled within their
* block compressors. */
static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
void const* end)
{
const BYTE* const iend = (const BYTE*)end;
switch(ms->cParams.strategy)
{
case ZSTD_fast:
ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
break;
case ZSTD_dfast:
ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
case ZSTD_btlazy2:
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
break;
default:
assert(0); /* not possible : not a valid strategy id */
}
return 0;
}
void ZSTD_ldm_fillHashTable(
ldmState_t* ldmState, const BYTE* ip,
const BYTE* iend, ldmParams_t const* params)
{
U32 const minMatchLength = params->minMatchLength;
U32 const hBits = params->hashLog - params->bucketSizeLog;
BYTE const* const base = ldmState->window.base;
BYTE const* const istart = ip;
ldmRollingHashState_t hashState;
size_t* const splits = ldmState->splitIndices;
unsigned numSplits;
DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
ZSTD_ldm_gear_init(&hashState, params);
while (ip < iend) {
size_t hashed;
unsigned n;
numSplits = 0;
hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
for (n = 0; n < numSplits; n++) {
if (ip + splits[n] >= istart + minMatchLength) {
BYTE const* const split = ip + splits[n] - minMatchLength;
U64 const xxhash = xxh64(split, minMatchLength, 0);
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
ldmEntry_t entry;
entry.offset = (U32)(split - base);
entry.checksum = (U32)(xxhash >> 32);
ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
}
}
ip += hashed;
}
}
/* ZSTD_ldm_limitTableUpdate() :
*
* Sets cctx->nextToUpdate to a position corresponding closer to anchor
* if it is far way
* (after a long match, only update tables a limited amount). */
static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
{
U32 const curr = (U32)(anchor - ms->window.base);
if (curr > ms->nextToUpdate + 1024) {
ms->nextToUpdate =
curr - MIN(512, curr - ms->nextToUpdate - 1024);
}
}
static size_t ZSTD_ldm_generateSequences_internal(
ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
ldmParams_t const* params, void const* src, size_t srcSize)
{
/* LDM parameters */
int const extDict = ZSTD_window_hasExtDict(ldmState->window);
U32 const minMatchLength = params->minMatchLength;
U32 const entsPerBucket = 1U << params->bucketSizeLog;
U32 const hBits = params->hashLog - params->bucketSizeLog;
/* Prefix and extDict parameters */
U32 const dictLimit = ldmState->window.dictLimit;
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
BYTE const* const base = ldmState->window.base;
BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
BYTE const* const lowPrefixPtr = base + dictLimit;
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
BYTE const* const ilimit = iend - HASH_READ_SIZE;
/* Input positions */
BYTE const* anchor = istart;
BYTE const* ip = istart;
/* Rolling hash state */
ldmRollingHashState_t hashState;
/* Arrays for staged-processing */
size_t* const splits = ldmState->splitIndices;
ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
unsigned numSplits;
if (srcSize < minMatchLength)
return iend - anchor;
/* Initialize the rolling hash state with the first minMatchLength bytes */
ZSTD_ldm_gear_init(&hashState, params);
ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
ip += minMatchLength;
while (ip < ilimit) {
size_t hashed;
unsigned n;
numSplits = 0;
hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
splits, &numSplits);
for (n = 0; n < numSplits; n++) {
BYTE const* const split = ip + splits[n] - minMatchLength;
U64 const xxhash = xxh64(split, minMatchLength, 0);
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
candidates[n].split = split;
candidates[n].hash = hash;
candidates[n].checksum = (U32)(xxhash >> 32);
candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
PREFETCH_L1(candidates[n].bucket);
}
for (n = 0; n < numSplits; n++) {
size_t forwardMatchLength = 0, backwardMatchLength = 0,
bestMatchLength = 0, mLength;
U32 offset;
BYTE const* const split = candidates[n].split;
U32 const checksum = candidates[n].checksum;
U32 const hash = candidates[n].hash;
ldmEntry_t* const bucket = candidates[n].bucket;
ldmEntry_t const* cur;
ldmEntry_t const* bestEntry = NULL;
ldmEntry_t newEntry;
newEntry.offset = (U32)(split - base);
newEntry.checksum = checksum;
/* If a split point would generate a sequence overlapping with
* the previous one, we merely register it in the hash table and
* move on */
if (split < anchor) {
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
continue;
}
for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
size_t curForwardMatchLength, curBackwardMatchLength,
curTotalMatchLength;
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
continue;
}
if (extDict) {
BYTE const* const curMatchBase =
cur->offset < dictLimit ? dictBase : base;
BYTE const* const pMatch = curMatchBase + cur->offset;
BYTE const* const matchEnd =
cur->offset < dictLimit ? dictEnd : iend;
BYTE const* const lowMatchPtr =
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
curForwardMatchLength =
ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
if (curForwardMatchLength < minMatchLength) {
continue;
}
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
} else { /* !extDict */
BYTE const* const pMatch = base + cur->offset;
curForwardMatchLength = ZSTD_count(split, pMatch, iend);
if (curForwardMatchLength < minMatchLength) {
continue;
}
curBackwardMatchLength =
ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
}
curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
if (curTotalMatchLength > bestMatchLength) {
bestMatchLength = curTotalMatchLength;
forwardMatchLength = curForwardMatchLength;
backwardMatchLength = curBackwardMatchLength;
bestEntry = cur;
}
}
/* No match found -- insert an entry into the hash table
* and process the next candidate match */
if (bestEntry == NULL) {
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
continue;
}
/* Match found */
offset = (U32)(split - base) - bestEntry->offset;
mLength = forwardMatchLength + backwardMatchLength;
{
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
/* Out of sequence storage */
if (rawSeqStore->size == rawSeqStore->capacity)
return ERROR(dstSize_tooSmall);
seq->litLength = (U32)(split - backwardMatchLength - anchor);
seq->matchLength = (U32)mLength;
seq->offset = offset;
rawSeqStore->size++;
}
/* Insert the current entry into the hash table --- it must be
* done after the previous block to avoid clobbering bestEntry */
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
anchor = split + forwardMatchLength;
/* If we find a match that ends after the data that we've hashed
* then we have a repeating, overlapping, pattern. E.g. all zeros.
* If one repetition of the pattern matches our `stopMask` then all
* repetitions will. We don't need to insert them all into out table,
* only the first one. So skip over overlapping matches.
* This is a major speed boost (20x) for compressing a single byte
* repeated, when that byte ends up in the table.
*/
if (anchor > ip + hashed) {
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
/* Continue the outer loop at anchor (ip + hashed == anchor). */
ip = anchor - hashed;
break;
}
}
ip += hashed;
}
return iend - anchor;
}
/*! ZSTD_ldm_reduceTable() :
* reduce table indexes by `reducerValue` */
static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
U32 const reducerValue)
{
U32 u;
for (u = 0; u < size; u++) {
if (table[u].offset < reducerValue) table[u].offset = 0;
else table[u].offset -= reducerValue;
}
}
size_t ZSTD_ldm_generateSequences(
ldmState_t* ldmState, rawSeqStore_t* sequences,
ldmParams_t const* params, void const* src, size_t srcSize)
{
U32 const maxDist = 1U << params->windowLog;
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
size_t const kMaxChunkSize = 1 << 20;
size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
size_t chunk;
size_t leftoverSize = 0;
assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
/* Check that ZSTD_window_update() has been called for this chunk prior
* to passing it to this function.
*/
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
/* The input could be very large (in zstdmt), so it must be broken up into
* chunks to enforce the maximum distance and handle overflow correction.
*/
assert(sequences->pos <= sequences->size);
assert(sequences->size <= sequences->capacity);
for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
size_t const remaining = (size_t)(iend - chunkStart);
BYTE const *const chunkEnd =
(remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
size_t const chunkSize = chunkEnd - chunkStart;
size_t newLeftoverSize;
size_t const prevSize = sequences->size;
assert(chunkStart < iend);
/* 1. Perform overflow correction if necessary. */
if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
U32 const ldmHSize = 1U << params->hashLog;
U32 const correction = ZSTD_window_correctOverflow(
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
/* invalidate dictionaries on overflow correction */
ldmState->loadedDictEnd = 0;
}
/* 2. We enforce the maximum offset allowed.
*
* kMaxChunkSize should be small enough that we don't lose too much of
* the window through early invalidation.
* TODO: * Test the chunk size.
* * Try invalidation after the sequence generation and test the
* the offset against maxDist directly.
*
* NOTE: Because of dictionaries + sequence splitting we MUST make sure
* that any offset used is valid at the END of the sequence, since it may
* be split into two sequences. This condition holds when using
* ZSTD_window_enforceMaxDist(), but if we move to checking offsets
* against maxDist directly, we'll have to carefully handle that case.
*/
ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
/* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
newLeftoverSize = ZSTD_ldm_generateSequences_internal(
ldmState, sequences, params, chunkStart, chunkSize);
if (ZSTD_isError(newLeftoverSize))
return newLeftoverSize;
/* 4. We add the leftover literals from previous iterations to the first
* newly generated sequence, or add the `newLeftoverSize` if none are
* generated.
*/
/* Prepend the leftover literals from the last call */
if (prevSize < sequences->size) {
sequences->seq[prevSize].litLength += (U32)leftoverSize;
leftoverSize = newLeftoverSize;
} else {
assert(newLeftoverSize == chunkSize);
leftoverSize += chunkSize;
}
}
return 0;
}
void
ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
{
while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
if (srcSize <= seq->litLength) {
/* Skip past srcSize literals */
seq->litLength -= (U32)srcSize;
return;
}
srcSize -= seq->litLength;
seq->litLength = 0;
if (srcSize < seq->matchLength) {
/* Skip past the first srcSize of the match */
seq->matchLength -= (U32)srcSize;
if (seq->matchLength < minMatch) {
/* The match is too short, omit it */
if (rawSeqStore->pos + 1 < rawSeqStore->size) {
seq[1].litLength += seq[0].matchLength;
}
rawSeqStore->pos++;
}
return;
}
srcSize -= seq->matchLength;
seq->matchLength = 0;
rawSeqStore->pos++;
}
}
/*
* If the sequence length is longer than remaining then the sequence is split
* between this block and the next.
*
* Returns the current sequence to handle, or if the rest of the block should
* be literals, it returns a sequence with offset == 0.
*/
static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
U32 const remaining, U32 const minMatch)
{
rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
assert(sequence.offset > 0);
/* Likely: No partial sequence */
if (remaining >= sequence.litLength + sequence.matchLength) {
rawSeqStore->pos++;
return sequence;
}
/* Cut the sequence short (offset == 0 ==> rest is literals). */
if (remaining <= sequence.litLength) {
sequence.offset = 0;
} else if (remaining < sequence.litLength + sequence.matchLength) {
sequence.matchLength = remaining - sequence.litLength;
if (sequence.matchLength < minMatch) {
sequence.offset = 0;
}
}
/* Skip past `remaining` bytes for the future sequences. */
ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
return sequence;
}
void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
if (currPos >= currSeq.litLength + currSeq.matchLength) {
currPos -= currSeq.litLength + currSeq.matchLength;
rawSeqStore->pos++;
} else {
rawSeqStore->posInSequence = currPos;
break;
}
}
if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
rawSeqStore->posInSequence = 0;
}
}
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
ZSTD_paramSwitch_e useRowMatchFinder,
void const* src, size_t srcSize)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
unsigned const minMatch = cParams->minMatch;
ZSTD_blockCompressor const blockCompressor =
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
/* Input bounds */
BYTE const* const istart = (BYTE const*)src;
BYTE const* const iend = istart + srcSize;
/* Input positions */
BYTE const* ip = istart;
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
/* If using opt parser, use LDMs only as candidates rather than always accepting them */
if (cParams->strategy >= ZSTD_btopt) {
size_t lastLLSize;
ms->ldmSeqStore = rawSeqStore;
lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
return lastLLSize;
}
assert(rawSeqStore->pos <= rawSeqStore->size);
assert(rawSeqStore->size <= rawSeqStore->capacity);
/* Loop through each sequence and apply the block compressor to the literals */
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
/* maybeSplitSequence updates rawSeqStore->pos */
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
(U32)(iend - ip), minMatch);
int i;
/* End signal */
if (sequence.offset == 0)
break;
assert(ip + sequence.litLength + sequence.matchLength <= iend);
/* Fill tables for block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
/* Run the block compressor */
DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
{
size_t const newLitLength =
blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
ip += sequence.litLength;
/* Update the repcodes */
for (i = ZSTD_REP_NUM - 1; i > 0; i--)
rep[i] = rep[i-1];
rep[0] = sequence.offset;
/* Store the sequence */
ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
STORE_OFFSET(sequence.offset),
sequence.matchLength);
ip += sequence.matchLength;
}
}
/* Fill the tables for the block compressor */
ZSTD_ldm_limitTableUpdate(ms, ip);
ZSTD_ldm_fillFastTables(ms, ip);
/* Compress the last literals */
return blockCompressor(ms, seqStore, rep, ip, iend - ip);
}
| linux-master | lib/zstd/compress/zstd_ldm.c |
/* ******************************************************************
* hist : Histogram functions
* part of Finite State Entropy project
* Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* --- dependencies --- */
#include "../common/mem.h" /* U32, BYTE, etc. */
#include "../common/debug.h" /* assert, DEBUGLOG */
#include "../common/error_private.h" /* ERROR */
#include "hist.h"
/* --- Error management --- */
unsigned HIST_isError(size_t code) { return ERR_isError(code); }
/*-**************************************************************
* Histogram functions
****************************************************************/
unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* const end = ip + srcSize;
unsigned maxSymbolValue = *maxSymbolValuePtr;
unsigned largestCount=0;
ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
while (ip<end) {
assert(*ip <= maxSymbolValue);
count[*ip++]++;
}
while (!count[maxSymbolValue]) maxSymbolValue--;
*maxSymbolValuePtr = maxSymbolValue;
{ U32 s;
for (s=0; s<=maxSymbolValue; s++)
if (count[s] > largestCount) largestCount = count[s];
}
return largestCount;
}
typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
/* HIST_count_parallel_wksp() :
* store histogram into 4 intermediate tables, recombined at the end.
* this design makes better use of OoO cpus,
* and is noticeably faster when some values are heavily repeated.
* But it needs some additional workspace for intermediate tables.
* `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
* @return : largest histogram frequency,
* or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
static size_t HIST_count_parallel_wksp(
unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize,
HIST_checkInput_e check,
U32* const workSpace)
{
const BYTE* ip = (const BYTE*)source;
const BYTE* const iend = ip+sourceSize;
size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
unsigned max=0;
U32* const Counting1 = workSpace;
U32* const Counting2 = Counting1 + 256;
U32* const Counting3 = Counting2 + 256;
U32* const Counting4 = Counting3 + 256;
/* safety checks */
assert(*maxSymbolValuePtr <= 255);
if (!sourceSize) {
ZSTD_memset(count, 0, countSize);
*maxSymbolValuePtr = 0;
return 0;
}
ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
/* by stripes of 16 bytes */
{ U32 cached = MEM_read32(ip); ip += 4;
while (ip < iend-15) {
U32 c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
}
ip-=4;
}
/* finish last symbols */
while (ip<iend) Counting1[*ip++]++;
{ U32 s;
for (s=0; s<256; s++) {
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
if (Counting1[s] > max) max = Counting1[s];
} }
{ unsigned maxSymbolValue = 255;
while (!Counting1[maxSymbolValue]) maxSymbolValue--;
if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
*maxSymbolValuePtr = maxSymbolValue;
ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
}
return (size_t)max;
}
/* HIST_countFast_wksp() :
* Same as HIST_countFast(), but using an externally provided scratch buffer.
* `workSpace` is a writable buffer which must be 4-bytes aligned,
* `workSpaceSize` must be >= HIST_WKSP_SIZE
*/
size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize,
void* workSpace, size_t workSpaceSize)
{
if (sourceSize < 1500) /* heuristic threshold */
return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
}
/* HIST_count_wksp() :
* Same as HIST_count(), but using an externally provided scratch buffer.
* `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize,
void* workSpace, size_t workSpaceSize)
{
if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
if (*maxSymbolValuePtr < 255)
return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
*maxSymbolValuePtr = 255;
return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
}
| linux-master | lib/zstd/compress/hist.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
* Dependencies
***************************************/
#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
#include "../common/mem.h"
#include "hist.h" /* HIST_countFast_wksp */
#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_compress_internal.h"
#include "zstd_compress_sequences.h"
#include "zstd_compress_literals.h"
#include "zstd_fast.h"
#include "zstd_double_fast.h"
#include "zstd_lazy.h"
#include "zstd_opt.h"
#include "zstd_ldm.h"
#include "zstd_compress_superblock.h"
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* COMPRESS_HEAPMODE :
* Select how default decompression function ZSTD_compress() allocates its context,
* on stack (0, default), or into heap (1).
* Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
*/
/*!
* ZSTD_HASHLOG3_MAX :
* Maximum size of the hash table dedicated to find 3-bytes matches,
* in log format, aka 17 => 1 << 17 == 128Ki positions.
* This structure is only used in zstd_opt.
* Since allocation is centralized for all strategies, it has to be known here.
* The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
* so that zstd_opt.c doesn't need to know about this constant.
*/
#ifndef ZSTD_HASHLOG3_MAX
# define ZSTD_HASHLOG3_MAX 17
#endif
/*-*************************************
* Helper functions
***************************************/
/* ZSTD_compressBound()
* Note that the result from this function is only compatible with the "normal"
* full-block strategy.
* When there are a lot of small blocks due to frequent flush in streaming mode
* the overhead of headers can make the compressed data to be larger than the
* return value of ZSTD_compressBound().
*/
size_t ZSTD_compressBound(size_t srcSize) {
return ZSTD_COMPRESSBOUND(srcSize);
}
/*-*************************************
* Context memory management
***************************************/
struct ZSTD_CDict_s {
const void* dictContent;
size_t dictContentSize;
ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
ZSTD_cwksp workspace;
ZSTD_matchState_t matchState;
ZSTD_compressedBlockState_t cBlockState;
ZSTD_customMem customMem;
U32 dictID;
int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
* row-based matchfinder. Unless the cdict is reloaded, we will use
* the same greedy/lazy matchfinder at compression time.
*/
}; /* typedef'd to ZSTD_CDict within "zstd.h" */
ZSTD_CCtx* ZSTD_createCCtx(void)
{
return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
}
static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
{
assert(cctx != NULL);
ZSTD_memset(cctx, 0, sizeof(*cctx));
cctx->customMem = memManager;
cctx->bmi2 = ZSTD_cpuSupportsBmi2();
{ size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
assert(!ZSTD_isError(err));
(void)err;
}
}
ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
{
ZSTD_STATIC_ASSERT(zcss_init==0);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
if (!cctx) return NULL;
ZSTD_initCCtx(cctx, customMem);
return cctx;
}
}
ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
{
ZSTD_cwksp ws;
ZSTD_CCtx* cctx;
if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
if (cctx == NULL) return NULL;
ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
ZSTD_cwksp_move(&cctx->workspace, &ws);
cctx->staticSize = workspaceSize;
/* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
return cctx;
}
/*
* Clears and frees all of the dictionaries in the CCtx.
*/
static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
{
ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
ZSTD_freeCDict(cctx->localDict.cdict);
ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
cctx->cdict = NULL;
}
static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
{
size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
return bufferSize + cdictSize;
}
static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
{
assert(cctx != NULL);
assert(cctx->staticSize == 0);
ZSTD_clearAllDicts(cctx);
ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
}
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"not compatible with static CCtx");
{
int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
ZSTD_freeCCtxContent(cctx);
if (!cctxInWorkspace) {
ZSTD_customFree(cctx, cctx->customMem);
}
}
return 0;
}
static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
{
(void)cctx;
return 0;
}
size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
{
if (cctx==NULL) return 0; /* support sizeof on NULL */
/* cctx may be in the workspace */
return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ ZSTD_cwksp_sizeof(&cctx->workspace)
+ ZSTD_sizeof_localDict(cctx->localDict)
+ ZSTD_sizeof_mtctx(cctx);
}
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
{
return ZSTD_sizeof_CCtx(zcs); /* same object */
}
/* private API call, for dictBuilder only */
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
/* Returns true if the strategy supports using a row based matchfinder */
static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
}
/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
* for this compression.
*/
static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
assert(mode != ZSTD_ps_auto);
return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
}
/* Returns row matchfinder usage given an initial mode and cParams */
static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
int const kHasSIMD128 = 1;
#else
int const kHasSIMD128 = 0;
#endif
if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
mode = ZSTD_ps_disable;
if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
if (kHasSIMD128) {
if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
} else {
if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
}
return mode;
}
/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
}
/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
const ZSTD_paramSwitch_e useRowMatchFinder,
const U32 forDDSDict) {
assert(useRowMatchFinder != ZSTD_ps_auto);
/* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
* We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
*/
return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
}
/* Returns 1 if compression parameters are such that we should
* enable long distance matching (wlog >= 27, strategy >= btopt).
* Returns 0 otherwise.
*/
static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
}
static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
ZSTD_compressionParameters cParams)
{
ZSTD_CCtx_params cctxParams;
/* should not matter, as all cParams are presumed properly defined */
ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
cctxParams.cParams = cParams;
/* Adjust advanced params according to cParams */
cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
assert(cctxParams.ldmParams.hashRateLog < 32);
}
cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
assert(!ZSTD_checkCParams(cParams));
return cctxParams;
}
static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
ZSTD_customMem customMem)
{
ZSTD_CCtx_params* params;
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
sizeof(ZSTD_CCtx_params), customMem);
if (!params) { return NULL; }
ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
params->customMem = customMem;
return params;
}
ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
{
return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
}
size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
{
if (params == NULL) { return 0; }
ZSTD_customFree(params, params->customMem);
return 0;
}
size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
{
return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
}
size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->compressionLevel = compressionLevel;
cctxParams->fParams.contentSizeFlag = 1;
return 0;
}
#define ZSTD_NO_CLEVEL 0
/*
* Initializes the cctxParams from params and compressionLevel.
* @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
*/
static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
{
assert(!ZSTD_checkCParams(params->cParams));
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->cParams = params->cParams;
cctxParams->fParams = params->fParams;
/* Should not matter, as all cParams are presumed properly defined.
* But, set it for tracing anyway.
*/
cctxParams->compressionLevel = compressionLevel;
cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams);
cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams);
cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams);
DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
}
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
{
RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
ZSTD_CCtxParams_init_internal(cctxParams, ¶ms, ZSTD_NO_CLEVEL);
return 0;
}
/*
* Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
* @param param Validated zstd parameters.
*/
static void ZSTD_CCtxParams_setZstdParams(
ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
{
assert(!ZSTD_checkCParams(params->cParams));
cctxParams->cParams = params->cParams;
cctxParams->fParams = params->fParams;
/* Should not matter, as all cParams are presumed properly defined.
* But, set it for tracing anyway.
*/
cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
}
ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
{
ZSTD_bounds bounds = { 0, 0, 0 };
switch(param)
{
case ZSTD_c_compressionLevel:
bounds.lowerBound = ZSTD_minCLevel();
bounds.upperBound = ZSTD_maxCLevel();
return bounds;
case ZSTD_c_windowLog:
bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
bounds.upperBound = ZSTD_WINDOWLOG_MAX;
return bounds;
case ZSTD_c_hashLog:
bounds.lowerBound = ZSTD_HASHLOG_MIN;
bounds.upperBound = ZSTD_HASHLOG_MAX;
return bounds;
case ZSTD_c_chainLog:
bounds.lowerBound = ZSTD_CHAINLOG_MIN;
bounds.upperBound = ZSTD_CHAINLOG_MAX;
return bounds;
case ZSTD_c_searchLog:
bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
bounds.upperBound = ZSTD_SEARCHLOG_MAX;
return bounds;
case ZSTD_c_minMatch:
bounds.lowerBound = ZSTD_MINMATCH_MIN;
bounds.upperBound = ZSTD_MINMATCH_MAX;
return bounds;
case ZSTD_c_targetLength:
bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
return bounds;
case ZSTD_c_strategy:
bounds.lowerBound = ZSTD_STRATEGY_MIN;
bounds.upperBound = ZSTD_STRATEGY_MAX;
return bounds;
case ZSTD_c_contentSizeFlag:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_checksumFlag:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_dictIDFlag:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_nbWorkers:
bounds.lowerBound = 0;
bounds.upperBound = 0;
return bounds;
case ZSTD_c_jobSize:
bounds.lowerBound = 0;
bounds.upperBound = 0;
return bounds;
case ZSTD_c_overlapLog:
bounds.lowerBound = 0;
bounds.upperBound = 0;
return bounds;
case ZSTD_c_enableDedicatedDictSearch:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_enableLongDistanceMatching:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_ldmHashLog:
bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
return bounds;
case ZSTD_c_ldmMinMatch:
bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
return bounds;
case ZSTD_c_ldmBucketSizeLog:
bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
return bounds;
case ZSTD_c_ldmHashRateLog:
bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
return bounds;
/* experimental parameters */
case ZSTD_c_rsyncable:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_forceMaxWindow :
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_format:
ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
bounds.lowerBound = ZSTD_f_zstd1;
bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
return bounds;
case ZSTD_c_forceAttachDict:
ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
bounds.lowerBound = ZSTD_dictDefaultAttach;
bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
return bounds;
case ZSTD_c_literalCompressionMode:
ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable);
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_targetCBlockSize:
bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
return bounds;
case ZSTD_c_srcSizeHint:
bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
return bounds;
case ZSTD_c_stableInBuffer:
case ZSTD_c_stableOutBuffer:
bounds.lowerBound = (int)ZSTD_bm_buffered;
bounds.upperBound = (int)ZSTD_bm_stable;
return bounds;
case ZSTD_c_blockDelimiters:
bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
return bounds;
case ZSTD_c_validateSequences:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_useBlockSplitter:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_useRowMatchFinder:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_deterministicRefPrefix:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
default:
bounds.error = ERROR(parameter_unsupported);
return bounds;
}
}
/* ZSTD_cParam_clampBounds:
* Clamps the value into the bounded range.
*/
static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
{
ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
if (ZSTD_isError(bounds.error)) return bounds.error;
if (*value < bounds.lowerBound) *value = bounds.lowerBound;
if (*value > bounds.upperBound) *value = bounds.upperBound;
return 0;
}
#define BOUNDCHECK(cParam, val) { \
RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
parameter_outOfBound, "Param out of bounds"); \
}
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
{
switch(param)
{
case ZSTD_c_compressionLevel:
case ZSTD_c_hashLog:
case ZSTD_c_chainLog:
case ZSTD_c_searchLog:
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
return 1;
case ZSTD_c_format:
case ZSTD_c_windowLog:
case ZSTD_c_contentSizeFlag:
case ZSTD_c_checksumFlag:
case ZSTD_c_dictIDFlag:
case ZSTD_c_forceMaxWindow :
case ZSTD_c_nbWorkers:
case ZSTD_c_jobSize:
case ZSTD_c_overlapLog:
case ZSTD_c_rsyncable:
case ZSTD_c_enableDedicatedDictSearch:
case ZSTD_c_enableLongDistanceMatching:
case ZSTD_c_ldmHashLog:
case ZSTD_c_ldmMinMatch:
case ZSTD_c_ldmBucketSizeLog:
case ZSTD_c_ldmHashRateLog:
case ZSTD_c_forceAttachDict:
case ZSTD_c_literalCompressionMode:
case ZSTD_c_targetCBlockSize:
case ZSTD_c_srcSizeHint:
case ZSTD_c_stableInBuffer:
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
case ZSTD_c_useBlockSplitter:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
default:
return 0;
}
}
size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
{
DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
if (cctx->streamStage != zcss_init) {
if (ZSTD_isUpdateAuthorized(param)) {
cctx->cParamsChanged = 1;
} else {
RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
} }
switch(param)
{
case ZSTD_c_nbWorkers:
RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
"MT not compatible with static alloc");
break;
case ZSTD_c_compressionLevel:
case ZSTD_c_windowLog:
case ZSTD_c_hashLog:
case ZSTD_c_chainLog:
case ZSTD_c_searchLog:
case ZSTD_c_minMatch:
case ZSTD_c_targetLength:
case ZSTD_c_strategy:
case ZSTD_c_ldmHashRateLog:
case ZSTD_c_format:
case ZSTD_c_contentSizeFlag:
case ZSTD_c_checksumFlag:
case ZSTD_c_dictIDFlag:
case ZSTD_c_forceMaxWindow:
case ZSTD_c_forceAttachDict:
case ZSTD_c_literalCompressionMode:
case ZSTD_c_jobSize:
case ZSTD_c_overlapLog:
case ZSTD_c_rsyncable:
case ZSTD_c_enableDedicatedDictSearch:
case ZSTD_c_enableLongDistanceMatching:
case ZSTD_c_ldmHashLog:
case ZSTD_c_ldmMinMatch:
case ZSTD_c_ldmBucketSizeLog:
case ZSTD_c_targetCBlockSize:
case ZSTD_c_srcSizeHint:
case ZSTD_c_stableInBuffer:
case ZSTD_c_stableOutBuffer:
case ZSTD_c_blockDelimiters:
case ZSTD_c_validateSequences:
case ZSTD_c_useBlockSplitter:
case ZSTD_c_useRowMatchFinder:
case ZSTD_c_deterministicRefPrefix:
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
}
size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
ZSTD_cParameter param, int value)
{
DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
switch(param)
{
case ZSTD_c_format :
BOUNDCHECK(ZSTD_c_format, value);
CCtxParams->format = (ZSTD_format_e)value;
return (size_t)CCtxParams->format;
case ZSTD_c_compressionLevel : {
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
if (value == 0)
CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
else
CCtxParams->compressionLevel = value;
if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
return 0; /* return type (size_t) cannot represent negative values */
}
case ZSTD_c_windowLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_windowLog, value);
CCtxParams->cParams.windowLog = (U32)value;
return CCtxParams->cParams.windowLog;
case ZSTD_c_hashLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_hashLog, value);
CCtxParams->cParams.hashLog = (U32)value;
return CCtxParams->cParams.hashLog;
case ZSTD_c_chainLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_chainLog, value);
CCtxParams->cParams.chainLog = (U32)value;
return CCtxParams->cParams.chainLog;
case ZSTD_c_searchLog :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_searchLog, value);
CCtxParams->cParams.searchLog = (U32)value;
return (size_t)value;
case ZSTD_c_minMatch :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_minMatch, value);
CCtxParams->cParams.minMatch = value;
return CCtxParams->cParams.minMatch;
case ZSTD_c_targetLength :
BOUNDCHECK(ZSTD_c_targetLength, value);
CCtxParams->cParams.targetLength = value;
return CCtxParams->cParams.targetLength;
case ZSTD_c_strategy :
if (value!=0) /* 0 => use default */
BOUNDCHECK(ZSTD_c_strategy, value);
CCtxParams->cParams.strategy = (ZSTD_strategy)value;
return (size_t)CCtxParams->cParams.strategy;
case ZSTD_c_contentSizeFlag :
/* Content size written in frame header _when known_ (default:1) */
DEBUGLOG(4, "set content size flag = %u", (value!=0));
CCtxParams->fParams.contentSizeFlag = value != 0;
return CCtxParams->fParams.contentSizeFlag;
case ZSTD_c_checksumFlag :
/* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
CCtxParams->fParams.checksumFlag = value != 0;
return CCtxParams->fParams.checksumFlag;
case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
CCtxParams->fParams.noDictIDFlag = !value;
return !CCtxParams->fParams.noDictIDFlag;
case ZSTD_c_forceMaxWindow :
CCtxParams->forceWindow = (value != 0);
return CCtxParams->forceWindow;
case ZSTD_c_forceAttachDict : {
const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
CCtxParams->attachDictPref = pref;
return CCtxParams->attachDictPref;
}
case ZSTD_c_literalCompressionMode : {
const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value;
BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
CCtxParams->literalCompressionMode = lcm;
return CCtxParams->literalCompressionMode;
}
case ZSTD_c_nbWorkers :
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
case ZSTD_c_jobSize :
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
case ZSTD_c_overlapLog :
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
case ZSTD_c_rsyncable :
RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
return 0;
case ZSTD_c_enableDedicatedDictSearch :
CCtxParams->enableDedicatedDictSearch = (value!=0);
return CCtxParams->enableDedicatedDictSearch;
case ZSTD_c_enableLongDistanceMatching :
CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
return CCtxParams->ldmParams.enableLdm;
case ZSTD_c_ldmHashLog :
if (value!=0) /* 0 ==> auto */
BOUNDCHECK(ZSTD_c_ldmHashLog, value);
CCtxParams->ldmParams.hashLog = value;
return CCtxParams->ldmParams.hashLog;
case ZSTD_c_ldmMinMatch :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
CCtxParams->ldmParams.minMatchLength = value;
return CCtxParams->ldmParams.minMatchLength;
case ZSTD_c_ldmBucketSizeLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
CCtxParams->ldmParams.bucketSizeLog = value;
return CCtxParams->ldmParams.bucketSizeLog;
case ZSTD_c_ldmHashRateLog :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
CCtxParams->ldmParams.hashRateLog = value;
return CCtxParams->ldmParams.hashRateLog;
case ZSTD_c_targetCBlockSize :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
CCtxParams->targetCBlockSize = value;
return CCtxParams->targetCBlockSize;
case ZSTD_c_srcSizeHint :
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_srcSizeHint, value);
CCtxParams->srcSizeHint = value;
return CCtxParams->srcSizeHint;
case ZSTD_c_stableInBuffer:
BOUNDCHECK(ZSTD_c_stableInBuffer, value);
CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
return CCtxParams->inBufferMode;
case ZSTD_c_stableOutBuffer:
BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
return CCtxParams->outBufferMode;
case ZSTD_c_blockDelimiters:
BOUNDCHECK(ZSTD_c_blockDelimiters, value);
CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
return CCtxParams->blockDelimiters;
case ZSTD_c_validateSequences:
BOUNDCHECK(ZSTD_c_validateSequences, value);
CCtxParams->validateSequences = value;
return CCtxParams->validateSequences;
case ZSTD_c_useBlockSplitter:
BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
return CCtxParams->useBlockSplitter;
case ZSTD_c_useRowMatchFinder:
BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
return CCtxParams->useRowMatchFinder;
case ZSTD_c_deterministicRefPrefix:
BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
CCtxParams->deterministicRefPrefix = !!value;
return CCtxParams->deterministicRefPrefix;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
}
size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
{
return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
}
size_t ZSTD_CCtxParams_getParameter(
ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
{
switch(param)
{
case ZSTD_c_format :
*value = CCtxParams->format;
break;
case ZSTD_c_compressionLevel :
*value = CCtxParams->compressionLevel;
break;
case ZSTD_c_windowLog :
*value = (int)CCtxParams->cParams.windowLog;
break;
case ZSTD_c_hashLog :
*value = (int)CCtxParams->cParams.hashLog;
break;
case ZSTD_c_chainLog :
*value = (int)CCtxParams->cParams.chainLog;
break;
case ZSTD_c_searchLog :
*value = CCtxParams->cParams.searchLog;
break;
case ZSTD_c_minMatch :
*value = CCtxParams->cParams.minMatch;
break;
case ZSTD_c_targetLength :
*value = CCtxParams->cParams.targetLength;
break;
case ZSTD_c_strategy :
*value = (unsigned)CCtxParams->cParams.strategy;
break;
case ZSTD_c_contentSizeFlag :
*value = CCtxParams->fParams.contentSizeFlag;
break;
case ZSTD_c_checksumFlag :
*value = CCtxParams->fParams.checksumFlag;
break;
case ZSTD_c_dictIDFlag :
*value = !CCtxParams->fParams.noDictIDFlag;
break;
case ZSTD_c_forceMaxWindow :
*value = CCtxParams->forceWindow;
break;
case ZSTD_c_forceAttachDict :
*value = CCtxParams->attachDictPref;
break;
case ZSTD_c_literalCompressionMode :
*value = CCtxParams->literalCompressionMode;
break;
case ZSTD_c_nbWorkers :
assert(CCtxParams->nbWorkers == 0);
*value = CCtxParams->nbWorkers;
break;
case ZSTD_c_jobSize :
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
case ZSTD_c_overlapLog :
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
case ZSTD_c_rsyncable :
RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
case ZSTD_c_enableDedicatedDictSearch :
*value = CCtxParams->enableDedicatedDictSearch;
break;
case ZSTD_c_enableLongDistanceMatching :
*value = CCtxParams->ldmParams.enableLdm;
break;
case ZSTD_c_ldmHashLog :
*value = CCtxParams->ldmParams.hashLog;
break;
case ZSTD_c_ldmMinMatch :
*value = CCtxParams->ldmParams.minMatchLength;
break;
case ZSTD_c_ldmBucketSizeLog :
*value = CCtxParams->ldmParams.bucketSizeLog;
break;
case ZSTD_c_ldmHashRateLog :
*value = CCtxParams->ldmParams.hashRateLog;
break;
case ZSTD_c_targetCBlockSize :
*value = (int)CCtxParams->targetCBlockSize;
break;
case ZSTD_c_srcSizeHint :
*value = (int)CCtxParams->srcSizeHint;
break;
case ZSTD_c_stableInBuffer :
*value = (int)CCtxParams->inBufferMode;
break;
case ZSTD_c_stableOutBuffer :
*value = (int)CCtxParams->outBufferMode;
break;
case ZSTD_c_blockDelimiters :
*value = (int)CCtxParams->blockDelimiters;
break;
case ZSTD_c_validateSequences :
*value = (int)CCtxParams->validateSequences;
break;
case ZSTD_c_useBlockSplitter :
*value = (int)CCtxParams->useBlockSplitter;
break;
case ZSTD_c_useRowMatchFinder :
*value = (int)CCtxParams->useRowMatchFinder;
break;
case ZSTD_c_deterministicRefPrefix:
*value = (int)CCtxParams->deterministicRefPrefix;
break;
default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
}
return 0;
}
/* ZSTD_CCtx_setParametersUsingCCtxParams() :
* just applies `params` into `cctx`
* no action is performed, parameters are merely stored.
* If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
* This is possible even if a compression is ongoing.
* In which case, new parameters will be applied on the fly, starting with next compression job.
*/
size_t ZSTD_CCtx_setParametersUsingCCtxParams(
ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
{
DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"The context is in the wrong stage!");
RETURN_ERROR_IF(cctx->cdict, stage_wrong,
"Can't override parameters with cdict attached (some must "
"be inherited from the cdict).");
cctx->requestedParams = *params;
return 0;
}
size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't set pledgedSrcSize when not in init stage.");
cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
return 0;
}
static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
int const compressionLevel,
size_t const dictSize);
static int ZSTD_dedicatedDictSearch_isSupported(
const ZSTD_compressionParameters* cParams);
static void ZSTD_dedicatedDictSearch_revertCParams(
ZSTD_compressionParameters* cParams);
/*
* Initializes the local dict using the requested parameters.
* NOTE: This does not use the pledged src size, because it may be used for more
* than one compression.
*/
static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
{
ZSTD_localDict* const dl = &cctx->localDict;
if (dl->dict == NULL) {
/* No local dictionary. */
assert(dl->dictBuffer == NULL);
assert(dl->cdict == NULL);
assert(dl->dictSize == 0);
return 0;
}
if (dl->cdict != NULL) {
assert(cctx->cdict == dl->cdict);
/* Local dictionary already initialized. */
return 0;
}
assert(dl->dictSize > 0);
assert(cctx->cdict == NULL);
assert(cctx->prefixDict.dict == NULL);
dl->cdict = ZSTD_createCDict_advanced2(
dl->dict,
dl->dictSize,
ZSTD_dlm_byRef,
dl->dictContentType,
&cctx->requestedParams,
cctx->customMem);
RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
cctx->cdict = dl->cdict;
return 0;
}
size_t ZSTD_CCtx_loadDictionary_advanced(
ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't load a dictionary when ctx is not in init stage.");
DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
ZSTD_clearAllDicts(cctx); /* in case one already exists */
if (dict == NULL || dictSize == 0) /* no dictionary mode */
return 0;
if (dictLoadMethod == ZSTD_dlm_byRef) {
cctx->localDict.dict = dict;
} else {
void* dictBuffer;
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
"no malloc for static CCtx");
dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
ZSTD_memcpy(dictBuffer, dict, dictSize);
cctx->localDict.dictBuffer = dictBuffer;
cctx->localDict.dict = dictBuffer;
}
cctx->localDict.dictSize = dictSize;
cctx->localDict.dictContentType = dictContentType;
return 0;
}
size_t ZSTD_CCtx_loadDictionary_byReference(
ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
{
return ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
}
size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
{
return ZSTD_CCtx_loadDictionary_advanced(
cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
}
size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a dict when ctx not in init stage.");
/* Free the existing local cdict (if any) to save memory. */
ZSTD_clearAllDicts(cctx);
cctx->cdict = cdict;
return 0;
}
size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a pool when ctx not in init stage.");
cctx->pool = pool;
return 0;
}
size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
{
return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
}
size_t ZSTD_CCtx_refPrefix_advanced(
ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't ref a prefix when ctx not in init stage.");
ZSTD_clearAllDicts(cctx);
if (prefix != NULL && prefixSize > 0) {
cctx->prefixDict.dict = prefix;
cctx->prefixDict.dictSize = prefixSize;
cctx->prefixDict.dictContentType = dictContentType;
}
return 0;
}
/*! ZSTD_CCtx_reset() :
* Also dumps dictionary */
size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
{
if ( (reset == ZSTD_reset_session_only)
|| (reset == ZSTD_reset_session_and_parameters) ) {
cctx->streamStage = zcss_init;
cctx->pledgedSrcSizePlusOne = 0;
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
"Can't reset parameters only when not in init stage.");
ZSTD_clearAllDicts(cctx);
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
}
return 0;
}
/* ZSTD_checkCParams() :
control CParam values remain within authorized range.
@return : 0, or an error code if one value is beyond authorized range */
size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
{
BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
return 0;
}
/* ZSTD_clampCParams() :
* make CParam values within valid range.
* @return : valid CParams */
static ZSTD_compressionParameters
ZSTD_clampCParams(ZSTD_compressionParameters cParams)
{
# define CLAMP_TYPE(cParam, val, type) { \
ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
}
# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
CLAMP(ZSTD_c_windowLog, cParams.windowLog);
CLAMP(ZSTD_c_chainLog, cParams.chainLog);
CLAMP(ZSTD_c_hashLog, cParams.hashLog);
CLAMP(ZSTD_c_searchLog, cParams.searchLog);
CLAMP(ZSTD_c_minMatch, cParams.minMatch);
CLAMP(ZSTD_c_targetLength,cParams.targetLength);
CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
return cParams;
}
/* ZSTD_cycleLog() :
* condition for correct operation : hashLog > 1 */
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
{
U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
return hashLog - btScale;
}
/* ZSTD_dictAndWindowLog() :
* Returns an adjusted window log that is large enough to fit the source and the dictionary.
* The zstd format says that the entire dictionary is valid if one byte of the dictionary
* is within the window. So the hashLog and chainLog should be large enough to reference both
* the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
* the hashLog and windowLog.
* NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
*/
static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
{
const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
/* No dictionary ==> No change */
if (dictSize == 0) {
return windowLog;
}
assert(windowLog <= ZSTD_WINDOWLOG_MAX);
assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
{
U64 const windowSize = 1ULL << windowLog;
U64 const dictAndWindowSize = dictSize + windowSize;
/* If the window size is already large enough to fit both the source and the dictionary
* then just use the window size. Otherwise adjust so that it fits the dictionary and
* the window.
*/
if (windowSize >= dictSize + srcSize) {
return windowLog; /* Window size large enough already */
} else if (dictAndWindowSize >= maxWindowSize) {
return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
} else {
return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
}
}
}
/* ZSTD_adjustCParams_internal() :
* optimize `cPar` for a specified input (`srcSize` and `dictSize`).
* mostly downsize to reduce memory consumption and initialization latency.
* `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
* `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
* note : `srcSize==0` means 0!
* condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
static ZSTD_compressionParameters
ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize,
ZSTD_cParamMode_e mode)
{
const U64 minSrcSize = 513; /* (1<<9) + 1 */
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
assert(ZSTD_checkCParams(cPar)==0);
switch (mode) {
case ZSTD_cpm_unknown:
case ZSTD_cpm_noAttachDict:
/* If we don't know the source size, don't make any
* assumptions about it. We will already have selected
* smaller parameters if a dictionary is in use.
*/
break;
case ZSTD_cpm_createCDict:
/* Assume a small source size when creating a dictionary
* with an unknown source size.
*/
if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
srcSize = minSrcSize;
break;
case ZSTD_cpm_attachDict:
/* Dictionary has its own dedicated parameters which have
* already been selected. We are selecting parameters
* for only the source.
*/
dictSize = 0;
break;
default:
assert(0);
break;
}
/* resize windowLog if input is small enough, to use less memory */
if ( (srcSize < maxWindowResize)
&& (dictSize < maxWindowResize) ) {
U32 const tSize = (U32)(srcSize + dictSize);
static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
ZSTD_highbit32(tSize-1) + 1;
if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
}
if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
if (cycleLog > dictAndWindowLog)
cPar.chainLog -= (cycleLog - dictAndWindowLog);
}
if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
return cPar;
}
ZSTD_compressionParameters
ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
unsigned long long srcSize,
size_t dictSize)
{
cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
}
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
static void ZSTD_overrideCParams(
ZSTD_compressionParameters* cParams,
const ZSTD_compressionParameters* overrides)
{
if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
if (overrides->strategy) cParams->strategy = overrides->strategy;
}
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
ZSTD_compressionParameters cParams;
if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
srcSizeHint = CCtxParams->srcSizeHint;
}
cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
assert(!ZSTD_checkCParams(cParams));
/* srcSizeHint == 0 means 0 */
return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
}
static size_t
ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
const ZSTD_paramSwitch_e useRowMatchFinder,
const U32 enableDedicatedDictSearch,
const U32 forCCtx)
{
/* chain table size should be 0 for fast or row-hash strategies */
size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
? ((size_t)1 << cParams->chainLog)
: 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
/* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
* surrounded by redzones in ASAN. */
size_t const tableSpace = chainSize * sizeof(U32)
+ hSize * sizeof(U32)
+ h3Size * sizeof(U32);
size_t const optPotentialSpace =
ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
: 0;
size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
? optPotentialSpace
: 0;
size_t const slackSpace = ZSTD_cwksp_slack_space_required();
/* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
assert(useRowMatchFinder != ZSTD_ps_auto);
DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
(U32)chainSize, (U32)hSize, (U32)h3Size);
return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
}
static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
const ZSTD_compressionParameters* cParams,
const ldmParams_t* ldmParams,
const int isStatic,
const ZSTD_paramSwitch_e useRowMatchFinder,
const size_t buffInSize,
const size_t buffOutSize,
const U64 pledgedSrcSize)
{
size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (cParams->minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
+ ZSTD_cwksp_alloc_size(buffOutSize);
size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
size_t const neededSpace =
cctxSpace +
entropySpace +
blockStateSpace +
ldmSpace +
ldmSeqSpace +
matchStateSize +
tokenSpace +
bufferSpace;
DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
return neededSpace;
}
size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
&cParams);
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
/* estimateCCtxSize is for one-shot compression. So no buffers should
* be needed. However, we still allocate two 0-sized buffers, which can
* take space under ASAN. */
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
}
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
{
ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
/* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
size_t noRowCCtxSize;
size_t rowCCtxSize;
initialParams.useRowMatchFinder = ZSTD_ps_disable;
noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
initialParams.useRowMatchFinder = ZSTD_ps_enable;
rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
return MAX(noRowCCtxSize, rowCCtxSize);
} else {
return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
}
}
static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
{
int tier = 0;
size_t largestSize = 0;
static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
for (; tier < 4; ++tier) {
/* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
}
return largestSize;
}
size_t ZSTD_estimateCCtxSize(int compressionLevel)
{
int level;
size_t memBudget = 0;
for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
/* Ensure monotonically increasing memory usage as compression level increases */
size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
return memBudget;
}
size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
{
RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
{ ZSTD_compressionParameters const cParams =
ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
? ((size_t)1 << cParams.windowLog) + blockSize
: 0;
size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams);
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
&cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
ZSTD_CONTENTSIZE_UNKNOWN);
}
}
size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
{
ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
/* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
size_t noRowCCtxSize;
size_t rowCCtxSize;
initialParams.useRowMatchFinder = ZSTD_ps_disable;
noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
initialParams.useRowMatchFinder = ZSTD_ps_enable;
rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
return MAX(noRowCCtxSize, rowCCtxSize);
} else {
return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
}
}
static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
return ZSTD_estimateCStreamSize_usingCParams(cParams);
}
size_t ZSTD_estimateCStreamSize(int compressionLevel)
{
int level;
size_t memBudget = 0;
for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
return memBudget;
}
/* ZSTD_getFrameProgression():
* tells how much data has been consumed (input) and produced (output) for current frame.
* able to count progression inside worker threads (non-blocking mode).
*/
ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
{
{ ZSTD_frameProgression fp;
size_t const buffered = (cctx->inBuff == NULL) ? 0 :
cctx->inBuffPos - cctx->inToCompress;
if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
assert(buffered <= ZSTD_BLOCKSIZE_MAX);
fp.ingested = cctx->consumedSrcSize + buffered;
fp.consumed = cctx->consumedSrcSize;
fp.produced = cctx->producedCSize;
fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
fp.currentJobID = 0;
fp.nbActiveWorkers = 0;
return fp;
} }
/*! ZSTD_toFlushNow()
* Only useful for multithreading scenarios currently (nbWorkers >= 1).
*/
size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
{
(void)cctx;
return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
}
static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
ZSTD_compressionParameters cParams2)
{
(void)cParams1;
(void)cParams2;
assert(cParams1.windowLog == cParams2.windowLog);
assert(cParams1.chainLog == cParams2.chainLog);
assert(cParams1.hashLog == cParams2.hashLog);
assert(cParams1.searchLog == cParams2.searchLog);
assert(cParams1.minMatch == cParams2.minMatch);
assert(cParams1.targetLength == cParams2.targetLength);
assert(cParams1.strategy == cParams2.strategy);
}
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
{
int i;
for (i = 0; i < ZSTD_REP_NUM; ++i)
bs->rep[i] = repStartValue[i];
bs->entropy.huf.repeatMode = HUF_repeat_none;
bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
}
/*! ZSTD_invalidateMatchState()
* Invalidate all the matches in the match finder tables.
* Requires nextSrc and base to be set (can be NULL).
*/
static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
{
ZSTD_window_clear(&ms->window);
ms->nextToUpdate = ms->window.dictLimit;
ms->loadedDictEnd = 0;
ms->opt.litLengthSum = 0; /* force reset of btopt stats */
ms->dictMatchState = NULL;
}
/*
* Controls, for this matchState reset, whether the tables need to be cleared /
* prepared for the coming compression (ZSTDcrp_makeClean), or whether the
* tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
* subsequent operation will overwrite the table space anyways (e.g., copying
* the matchState contents in from a CDict).
*/
typedef enum {
ZSTDcrp_makeClean,
ZSTDcrp_leaveDirty
} ZSTD_compResetPolicy_e;
/*
* Controls, for this matchState reset, whether indexing can continue where it
* left off (ZSTDirp_continue), or whether it needs to be restarted from zero
* (ZSTDirp_reset).
*/
typedef enum {
ZSTDirp_continue,
ZSTDirp_reset
} ZSTD_indexResetPolicy_e;
typedef enum {
ZSTD_resetTarget_CDict,
ZSTD_resetTarget_CCtx
} ZSTD_resetTarget_e;
static size_t
ZSTD_reset_matchState(ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
const ZSTD_compressionParameters* cParams,
const ZSTD_paramSwitch_e useRowMatchFinder,
const ZSTD_compResetPolicy_e crp,
const ZSTD_indexResetPolicy_e forceResetIndex,
const ZSTD_resetTarget_e forWho)
{
/* disable chain table allocation for fast or row-based strategies */
size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
? ((size_t)1 << cParams->chainLog)
: 0;
size_t const hSize = ((size_t)1) << cParams->hashLog;
U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
assert(useRowMatchFinder != ZSTD_ps_auto);
if (forceResetIndex == ZSTDirp_reset) {
ZSTD_window_init(&ms->window);
ZSTD_cwksp_mark_tables_dirty(ws);
}
ms->hashLog3 = hashLog3;
ZSTD_invalidateMatchState(ms);
assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
ZSTD_cwksp_clear_tables(ws);
DEBUGLOG(5, "reserving table space");
/* table Space */
ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
"failed a workspace allocation in ZSTD_reset_matchState");
DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
if (crp!=ZSTDcrp_leaveDirty) {
/* reset tables only */
ZSTD_cwksp_clean_tables(ws);
}
/* opt parser space */
if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
DEBUGLOG(4, "reserving optimal parser space");
ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
}
if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
{ /* Row match finder needs an additional table of hashes ("tags") */
size_t const tagTableSize = hSize*sizeof(U16);
ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
}
{ /* Switch to 32-entry rows if searchLog is 5 (or more) */
U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
assert(cParams->hashLog >= rowLog);
ms->rowHashLog = cParams->hashLog - rowLog;
}
}
ms->cParams = *cParams;
RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
"failed a workspace allocation in ZSTD_reset_matchState");
return 0;
}
/* ZSTD_indexTooCloseToMax() :
* minor optimization : prefer memset() rather than reduceIndex()
* which is measurably slow in some circumstances (reported for Visual Studio).
* Works when re-using a context for a lot of smallish inputs :
* if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
* memset() will be triggered before reduceIndex().
*/
#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
{
return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
}
/* ZSTD_dictTooBig():
* When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
* one go generically. So we ensure that in that case we reset the tables to zero,
* so that we can load as much of the dictionary as possible.
*/
static int ZSTD_dictTooBig(size_t const loadedDictSize)
{
return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
}
/*! ZSTD_resetCCtx_internal() :
* @param loadedDictSize The size of the dictionary to be loaded
* into the context, if any. If no dictionary is used, or the
* dictionary is being attached / copied, then pass 0.
* note : `params` are assumed fully validated at this stage.
*/
static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
ZSTD_CCtx_params const* params,
U64 const pledgedSrcSize,
size_t const loadedDictSize,
ZSTD_compResetPolicy_e const crp,
ZSTD_buffered_policy_e const zbuff)
{
ZSTD_cwksp* const ws = &zc->workspace;
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
(U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zc->isFirstBlock = 1;
/* Set applied params early so we can modify them for LDM,
* and point params at the applied params.
*/
zc->appliedParams = *params;
params = &zc->appliedParams;
assert(params->useRowMatchFinder != ZSTD_ps_auto);
assert(params->useBlockSplitter != ZSTD_ps_auto);
assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* Adjust long distance matching parameters */
ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams);
assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
assert(params->ldmParams.hashRateLog < 32);
}
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
? ZSTD_compressBound(blockSize) + 1
: 0;
size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered)
? windowSize + blockSize
: 0;
size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
ZSTD_indexResetPolicy_e needsIndexReset =
(indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
size_t const neededSpace =
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
buffInSize, buffOutSize, pledgedSrcSize);
int resizeWorkspace;
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
{ /* Check if workspace is large enough, alloc a new one if needed */
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
resizeWorkspace = workspaceTooSmall || workspaceWasteful;
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
if (resizeWorkspace) {
DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
ZSTD_cwksp_sizeof(ws) >> 10,
neededSpace >> 10);
RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
needsIndexReset = ZSTDirp_reset;
ZSTD_cwksp_free(ws, zc->customMem);
FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
DEBUGLOG(5, "reserving object space");
/* Statically sized space.
* entropyWorkspace never moves,
* though prev/next block swap places */
assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
} }
ZSTD_cwksp_clear(ws);
/* init params */
zc->blockState.matchState.cParams = params->cParams;
zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
zc->consumedSrcSize = 0;
zc->producedCSize = 0;
if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
zc->appliedParams.fParams.contentSizeFlag = 0;
DEBUGLOG(4, "pledged content size : %u ; flag : %u",
(unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
zc->blockSize = blockSize;
xxh64_reset(&zc->xxhState, 0);
zc->stage = ZSTDcs_init;
zc->dictID = 0;
zc->dictContentSize = 0;
ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
/* ZSTD_wildcopy() is used to copy into the literals buffer,
* so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
*/
zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
zc->seqStore.maxNbLit = blockSize;
/* buffers */
zc->bufferedPolicy = zbuff;
zc->inBuffSize = buffInSize;
zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
zc->outBuffSize = buffOutSize;
zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
/* ldm bucketOffsets table */
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* TODO: avoid memset? */
size_t const numBuckets =
((size_t)1) << (params->ldmParams.hashLog -
params->ldmParams.bucketSizeLog);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
}
/* sequences storage */
ZSTD_referenceExternalSequences(zc, NULL, 0);
zc->seqStore.maxNbSeq = maxNbSeq;
zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
FORWARD_IF_ERROR(ZSTD_reset_matchState(
&zc->blockState.matchState,
ws,
¶ms->cParams,
params->useRowMatchFinder,
crp,
needsIndexReset,
ZSTD_resetTarget_CCtx), "");
/* ldm hash table */
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
/* TODO: avoid memset? */
size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
zc->maxNbLdmSequences = maxNbLdmSeq;
ZSTD_window_init(&zc->ldmState.window);
zc->ldmState.loadedDictEnd = 0;
}
DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
zc->initialized = 1;
return 0;
}
}
/* ZSTD_invalidateRepCodes() :
* ensures next compression will not use repcodes from previous block.
* Note : only works with regular variant;
* do not use with extDict variant ! */
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
int i;
for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
}
/* These are the approximate sizes for each strategy past which copying the
* dictionary tables into the working context is faster than using them
* in-place.
*/
static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
8 KB, /* unused */
8 KB, /* ZSTD_fast */
16 KB, /* ZSTD_dfast */
32 KB, /* ZSTD_greedy */
32 KB, /* ZSTD_lazy */
32 KB, /* ZSTD_lazy2 */
32 KB, /* ZSTD_btlazy2 */
32 KB, /* ZSTD_btopt */
8 KB, /* ZSTD_btultra */
8 KB /* ZSTD_btultra2 */
};
static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
U64 pledgedSrcSize)
{
size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
return dedicatedDictSearch
|| ( ( pledgedSrcSize <= cutoff
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| params->attachDictPref == ZSTD_dictForceAttach )
&& params->attachDictPref != ZSTD_dictForceCopy
&& !params->forceWindow ); /* dictMatchState isn't correctly
* handled in _enforceMaxDist */
}
static size_t
ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu",
(unsigned long long)pledgedSrcSize);
{
ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
/* Resize working context table params for input only, since the dict
* has its own tables. */
/* pledgedSrcSize == 0 means 0! */
if (cdict->matchState.dedicatedDictSearch) {
ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
}
params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
cdict->dictContentSize, ZSTD_cpm_attachDict);
params.cParams.windowLog = windowLog;
params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize,
/* loadedDictSize */ 0,
ZSTDcrp_makeClean, zbuff), "");
assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
}
{ const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
- cdict->matchState.window.base);
const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
if (cdictLen == 0) {
/* don't even attach dictionaries with no contents */
DEBUGLOG(4, "skipping attaching empty dictionary");
} else {
DEBUGLOG(4, "attaching dictionary into context");
cctx->blockState.matchState.dictMatchState = &cdict->matchState;
/* prep working match state so dict matches never have negative indices
* when they are translated to the working context's index space. */
if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
cctx->blockState.matchState.window.nextSrc =
cctx->blockState.matchState.window.base + cdictEnd;
ZSTD_window_clear(&cctx->blockState.matchState.window);
}
/* loadedDictEnd is expressed within the referential of the active context */
cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
} }
cctx->dictID = cdict->dictID;
cctx->dictContentSize = cdict->dictContentSize;
/* copy block state */
ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
return 0;
}
static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
ZSTD_CCtx_params params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
assert(!cdict->matchState.dedicatedDictSearch);
DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu",
(unsigned long long)pledgedSrcSize);
{ unsigned const windowLog = params.cParams.windowLog;
assert(windowLog != 0);
/* Copy only compression parameters related to tables. */
params.cParams = *cdict_cParams;
params.cParams.windowLog = windowLog;
params.useRowMatchFinder = cdict->useRowMatchFinder;
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize,
/* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff), "");
assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
}
ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
assert(params.useRowMatchFinder != ZSTD_ps_auto);
/* copy tables */
{ size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
? ((size_t)1 << cdict_cParams->chainLog)
: 0;
size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
ZSTD_memcpy(cctx->blockState.matchState.hashTable,
cdict->matchState.hashTable,
hSize * sizeof(U32));
/* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
ZSTD_memcpy(cctx->blockState.matchState.chainTable,
cdict->matchState.chainTable,
chainSize * sizeof(U32));
}
/* copy tag table */
if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
size_t const tagTableSize = hSize*sizeof(U16);
ZSTD_memcpy(cctx->blockState.matchState.tagTable,
cdict->matchState.tagTable,
tagTableSize);
}
}
/* Zero the hashTable3, since the cdict never fills it */
{ int const h3log = cctx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
assert(cdict->matchState.hashLog3 == 0);
ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
}
ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
/* copy dictionary offsets */
{ ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
}
cctx->dictID = cdict->dictID;
cctx->dictContentSize = cdict->dictContentSize;
/* copy block state */
ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
return 0;
}
/* We have a choice between copying the dictionary context into the working
* context, or referencing the dictionary context from the working context
* in-place. We decide here which strategy to use. */
static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
(unsigned)pledgedSrcSize);
if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
return ZSTD_resetCCtx_byAttachingCDict(
cctx, cdict, *params, pledgedSrcSize, zbuff);
} else {
return ZSTD_resetCCtx_byCopyingCDict(
cctx, cdict, *params, pledgedSrcSize, zbuff);
}
}
/*! ZSTD_copyCCtx_internal() :
* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
* The "context", in this case, refers to the hash and chain tables,
* entropy tables, and dictionary references.
* `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
* @return : 0, or an error code */
static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
const ZSTD_CCtx* srcCCtx,
ZSTD_frameParameters fParams,
U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
"Can't copy a ctx that's not in init stage.");
DEBUGLOG(5, "ZSTD_copyCCtx_internal");
ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
{ ZSTD_CCtx_params params = dstCCtx->requestedParams;
/* Copy only compression parameters related to tables. */
params.cParams = srcCCtx->appliedParams.cParams;
assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
params.ldmParams = srcCCtx->appliedParams.ldmParams;
params.fParams = fParams;
ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize,
/* loadedDictSize */ 0,
ZSTDcrp_leaveDirty, zbuff);
assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
}
ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
/* copy tables */
{ size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
srcCCtx->appliedParams.useRowMatchFinder,
0 /* forDDSDict */)
? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
: 0;
size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
int const h3log = srcCCtx->blockState.matchState.hashLog3;
size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
srcCCtx->blockState.matchState.hashTable,
hSize * sizeof(U32));
ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
srcCCtx->blockState.matchState.chainTable,
chainSize * sizeof(U32));
ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
srcCCtx->blockState.matchState.hashTable3,
h3Size * sizeof(U32));
}
ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
/* copy dictionary offsets */
{
const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
dstMatchState->window = srcMatchState->window;
dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
}
dstCCtx->dictID = srcCCtx->dictID;
dstCCtx->dictContentSize = srcCCtx->dictContentSize;
/* copy block state */
ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
return 0;
}
/*! ZSTD_copyCCtx() :
* Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
* pledgedSrcSize==0 means "unknown".
* @return : 0, or an error code */
size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
{
ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
fParams, pledgedSrcSize,
zbuff);
}
#define ZSTD_ROWSIZE 16
/*! ZSTD_reduceTable() :
* reduce table indexes by `reducerValue`, or squash to zero.
* PreserveMark preserves "unsorted mark" for btlazy2 strategy.
* It must be set to a clear 0/1 value, to remove branch during inlining.
* Presume table size is a multiple of ZSTD_ROWSIZE
* to help auto-vectorization */
FORCE_INLINE_TEMPLATE void
ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
{
int const nbRows = (int)size / ZSTD_ROWSIZE;
int cellNb = 0;
int rowNb;
/* Protect special index values < ZSTD_WINDOW_START_INDEX. */
U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
assert(size < (1U<<31)); /* can be casted to int */
for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
int column;
for (column=0; column<ZSTD_ROWSIZE; column++) {
U32 newVal;
if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
/* This write is pointless, but is required(?) for the compiler
* to auto-vectorize the loop. */
newVal = ZSTD_DUBT_UNSORTED_MARK;
} else if (table[cellNb] < reducerThreshold) {
newVal = 0;
} else {
newVal = table[cellNb] - reducerValue;
}
table[cellNb] = newVal;
cellNb++;
} }
}
static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
{
ZSTD_reduceTable_internal(table, size, reducerValue, 0);
}
static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
{
ZSTD_reduceTable_internal(table, size, reducerValue, 1);
}
/*! ZSTD_reduceIndex() :
* rescale all indexes to avoid future overflow (indexes are U32) */
static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
{
{ U32 const hSize = (U32)1 << params->cParams.hashLog;
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
}
if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
U32 const chainSize = (U32)1 << params->cParams.chainLog;
if (params->cParams.strategy == ZSTD_btlazy2)
ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
else
ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
}
if (ms->hashLog3) {
U32 const h3Size = (U32)1 << ms->hashLog3;
ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
}
}
/*-*******************************************************
* Block entropic compression
*********************************************************/
/* See doc/zstd_compression_format.md for detailed format description */
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
{
const seqDef* const sequences = seqStorePtr->sequencesStart;
BYTE* const llCodeTable = seqStorePtr->llCode;
BYTE* const ofCodeTable = seqStorePtr->ofCode;
BYTE* const mlCodeTable = seqStorePtr->mlCode;
U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
U32 u;
assert(nbSeq <= seqStorePtr->maxNbSeq);
for (u=0; u<nbSeq; u++) {
U32 const llv = sequences[u].litLength;
U32 const mlv = sequences[u].mlBase;
llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
}
if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
}
/* ZSTD_useTargetCBlockSize():
* Returns if target compressed block size param is being used.
* If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
* Returns 1 if true, 0 otherwise. */
static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
{
DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
return (cctxParams->targetCBlockSize != 0);
}
/* ZSTD_blockSplitterEnabled():
* Returns if block splitting param is being used
* If used, compression will do best effort to split a block in order to improve compression ratio.
* At the time this function is called, the parameter must be finalized.
* Returns 1 if true, 0 otherwise. */
static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
{
DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
}
/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
* and size of the sequences statistics
*/
typedef struct {
U32 LLtype;
U32 Offtype;
U32 MLtype;
size_t size;
size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
} ZSTD_symbolEncodingTypeStats_t;
/* ZSTD_buildSequencesStatistics():
* Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
* Modifies `nextEntropy` to have the appropriate values as a side effect.
* nbSeq must be greater than 0.
*
* entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
*/
static ZSTD_symbolEncodingTypeStats_t
ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
BYTE* dst, const BYTE* const dstEnd,
ZSTD_strategy strategy, unsigned* countWorkspace,
void* entropyWorkspace, size_t entropyWkspSize) {
BYTE* const ostart = dst;
const BYTE* const oend = dstEnd;
BYTE* op = ostart;
FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
ZSTD_symbolEncodingTypeStats_t stats;
stats.lastCountSize = 0;
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
assert(op <= oend);
assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
/* build CTable for Literal Lengths */
{ unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
countWorkspace, max, mostFrequent, nbSeq,
LLFSELog, prevEntropy->litlengthCTable,
LL_defaultNorm, LL_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(set_basic < set_compressed && set_rle < set_compressed);
assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
countWorkspace, max, llCodeTable, nbSeq,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->litlengthCTable,
sizeof(prevEntropy->litlengthCTable),
entropyWorkspace, entropyWkspSize);
if (ZSTD_isError(countSize)) {
DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
stats.size = countSize;
return stats;
}
if (stats.LLtype == set_compressed)
stats.lastCountSize = countSize;
op += countSize;
assert(op <= oend);
} }
/* build CTable for Offsets */
{ unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(
countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
countWorkspace, max, mostFrequent, nbSeq,
OffFSELog, prevEntropy->offcodeCTable,
OF_defaultNorm, OF_defaultNormLog,
defaultPolicy, strategy);
assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
countWorkspace, max, ofCodeTable, nbSeq,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->offcodeCTable,
sizeof(prevEntropy->offcodeCTable),
entropyWorkspace, entropyWkspSize);
if (ZSTD_isError(countSize)) {
DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
stats.size = countSize;
return stats;
}
if (stats.Offtype == set_compressed)
stats.lastCountSize = countSize;
op += countSize;
assert(op <= oend);
} }
/* build CTable for MatchLengths */
{ unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(
countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
countWorkspace, max, mostFrequent, nbSeq,
MLFSELog, prevEntropy->matchlengthCTable,
ML_defaultNorm, ML_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
countWorkspace, max, mlCodeTable, nbSeq,
ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->matchlengthCTable,
sizeof(prevEntropy->matchlengthCTable),
entropyWorkspace, entropyWkspSize);
if (ZSTD_isError(countSize)) {
DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
stats.size = countSize;
return stats;
}
if (stats.MLtype == set_compressed)
stats.lastCountSize = countSize;
op += countSize;
assert(op <= oend);
} }
stats.size = (size_t)(op-ostart);
return stats;
}
/* ZSTD_entropyCompressSeqStore_internal():
* compresses both literals and sequences
* Returns compressed size of block, or a zstd error.
*/
#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
MEM_STATIC size_t
ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
void* entropyWorkspace, size_t entropyWkspSize,
const int bmi2)
{
const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
unsigned* count = (unsigned*)entropyWorkspace;
FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
const seqDef* const sequences = seqStorePtr->sequencesStart;
const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
size_t lastCountSize;
entropyWorkspace = count + (MaxSeq + 1);
entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
/* Compress literals */
{ const BYTE* const literals = seqStorePtr->litStart;
size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart;
size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart;
/* Base suspicion of uncompressibility on ratio of literals to sequences */
unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
size_t const litSize = (size_t)(seqStorePtr->lit - literals);
size_t const cSize = ZSTD_compressLiterals(
&prevEntropy->huf, &nextEntropy->huf,
cctxParams->cParams.strategy,
ZSTD_literalsCompressionIsDisabled(cctxParams),
op, dstCapacity,
literals, litSize,
entropyWorkspace, entropyWkspSize,
bmi2, suspectUncompressible);
FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
assert(cSize <= dstCapacity);
op += cSize;
}
/* Sequences Header */
RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
dstSize_tooSmall, "Can't fit seq hdr in output buf!");
if (nbSeq < 128) {
*op++ = (BYTE)nbSeq;
} else if (nbSeq < LONGNBSEQ) {
op[0] = (BYTE)((nbSeq>>8) + 0x80);
op[1] = (BYTE)nbSeq;
op+=2;
} else {
op[0]=0xFF;
MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
op+=3;
}
assert(op <= oend);
if (nbSeq==0) {
/* Copy the old tables over as if we repeated them */
ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
return (size_t)(op - ostart);
}
{
ZSTD_symbolEncodingTypeStats_t stats;
BYTE* seqHead = op++;
/* build stats for sequences */
stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
&prevEntropy->fse, &nextEntropy->fse,
op, oend,
strategy, count,
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
*seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
lastCountSize = stats.lastCountSize;
op += stats.size;
}
{ size_t const bitstreamSize = ZSTD_encodeSequences(
op, (size_t)(oend - op),
CTable_MatchLength, mlCodeTable,
CTable_OffsetBits, ofCodeTable,
CTable_LitLength, llCodeTable,
sequences, nbSeq,
longOffsets, bmi2);
FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
op += bitstreamSize;
assert(op <= oend);
/* zstd versions <= 1.3.4 mistakenly report corruption when
* FSE_readNCount() receives a buffer < 4 bytes.
* Fixed by https://github.com/facebook/zstd/pull/1146.
* This can happen when the last set_compressed table present is 2
* bytes and the bitstream is only one byte.
* In this exceedingly rare case, we will simply emit an uncompressed
* block, since it isn't worth optimizing.
*/
if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
/* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
assert(lastCountSize + bitstreamSize == 3);
DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
"emitting an uncompressed block.");
return 0;
}
}
DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
return (size_t)(op - ostart);
}
MEM_STATIC size_t
ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
size_t srcSize,
void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
{
size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
seqStorePtr, prevEntropy, nextEntropy, cctxParams,
dst, dstCapacity,
entropyWorkspace, entropyWkspSize, bmi2);
if (cSize == 0) return 0;
/* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
* Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
*/
if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
return 0; /* block not compressed */
FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
/* Check compressibility */
{ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
if (cSize >= maxCSize) return 0; /* block not compressed */
}
DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
return cSize;
}
/* ZSTD_selectBlockCompressor() :
* Not static, but internal use only (used by long distance matcher)
* assumption : strat is a valid strategy */
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
{
static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
{ ZSTD_compressBlock_fast /* default for 0 */,
ZSTD_compressBlock_fast,
ZSTD_compressBlock_doubleFast,
ZSTD_compressBlock_greedy,
ZSTD_compressBlock_lazy,
ZSTD_compressBlock_lazy2,
ZSTD_compressBlock_btlazy2,
ZSTD_compressBlock_btopt,
ZSTD_compressBlock_btultra,
ZSTD_compressBlock_btultra2 },
{ ZSTD_compressBlock_fast_extDict /* default for 0 */,
ZSTD_compressBlock_fast_extDict,
ZSTD_compressBlock_doubleFast_extDict,
ZSTD_compressBlock_greedy_extDict,
ZSTD_compressBlock_lazy_extDict,
ZSTD_compressBlock_lazy2_extDict,
ZSTD_compressBlock_btlazy2_extDict,
ZSTD_compressBlock_btopt_extDict,
ZSTD_compressBlock_btultra_extDict,
ZSTD_compressBlock_btultra_extDict },
{ ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
ZSTD_compressBlock_fast_dictMatchState,
ZSTD_compressBlock_doubleFast_dictMatchState,
ZSTD_compressBlock_greedy_dictMatchState,
ZSTD_compressBlock_lazy_dictMatchState,
ZSTD_compressBlock_lazy2_dictMatchState,
ZSTD_compressBlock_btlazy2_dictMatchState,
ZSTD_compressBlock_btopt_dictMatchState,
ZSTD_compressBlock_btultra_dictMatchState,
ZSTD_compressBlock_btultra_dictMatchState },
{ NULL /* default for 0 */,
NULL,
NULL,
ZSTD_compressBlock_greedy_dedicatedDictSearch,
ZSTD_compressBlock_lazy_dedicatedDictSearch,
ZSTD_compressBlock_lazy2_dedicatedDictSearch,
NULL,
NULL,
NULL,
NULL }
};
ZSTD_blockCompressor selectedCompressor;
ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
{ ZSTD_compressBlock_greedy_row,
ZSTD_compressBlock_lazy_row,
ZSTD_compressBlock_lazy2_row },
{ ZSTD_compressBlock_greedy_extDict_row,
ZSTD_compressBlock_lazy_extDict_row,
ZSTD_compressBlock_lazy2_extDict_row },
{ ZSTD_compressBlock_greedy_dictMatchState_row,
ZSTD_compressBlock_lazy_dictMatchState_row,
ZSTD_compressBlock_lazy2_dictMatchState_row },
{ ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
};
DEBUGLOG(4, "Selecting a row-based matchfinder");
assert(useRowMatchFinder != ZSTD_ps_auto);
selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
} else {
selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
}
assert(selectedCompressor != NULL);
return selectedCompressor;
}
static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
const BYTE* anchor, size_t lastLLSize)
{
ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
void ZSTD_resetSeqStore(seqStore_t* ssPtr)
{
ssPtr->lit = ssPtr->litStart;
ssPtr->sequences = ssPtr->sequencesStart;
ssPtr->longLengthType = ZSTD_llt_none;
}
typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
{
ZSTD_matchState_t* const ms = &zc->blockState.matchState;
DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
/* Assert that we have correctly flushed the ctx params into the ms's copy */
ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
} else {
ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
}
return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
}
ZSTD_resetSeqStore(&(zc->seqStore));
/* required for optimal parser to read stats from dictionary */
ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
/* tell the optimal parser how we expect to compress literals */
ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
/* a gap between an attached dict and the current window is not safe,
* they must remain adjacent,
* and when that stops being the case, the dict must be unset */
assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
/* limited update after a very long match */
{ const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const U32 curr = (U32)(istart-base);
if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
if (curr > ms->nextToUpdate + 384)
ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
}
/* select and store sequences */
{ ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
size_t lastLLSize;
{ int i;
for (i = 0; i < ZSTD_REP_NUM; ++i)
zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
}
if (zc->externSeqStore.pos < zc->externSeqStore.size) {
assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&zc->externSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
} else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
ldmSeqStore.seq = zc->ldmSequences;
ldmSeqStore.capacity = zc->maxNbLdmSequences;
/* Updates ldmSeqStore.size */
FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
&zc->appliedParams.ldmParams,
src, srcSize), "");
/* Updates ldmSeqStore.pos */
lastLLSize =
ZSTD_ldm_blockCompress(&ldmSeqStore,
ms, &zc->seqStore,
zc->blockState.nextCBlock->rep,
zc->appliedParams.useRowMatchFinder,
src, srcSize);
assert(ldmSeqStore.pos == ldmSeqStore.size);
} else { /* not long range mode */
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
zc->appliedParams.useRowMatchFinder,
dictMode);
ms->ldmSeqStore = NULL;
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
}
{ const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
} }
return ZSTDbss_compress;
}
static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
{
const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
const seqDef* seqStoreSeqs = seqStore->sequencesStart;
size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
size_t literalsRead = 0;
size_t lastLLSize;
ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
size_t i;
repcodes_t updatedRepcodes;
assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
/* Ensure we have enough space for last literals "sequence" */
assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
for (i = 0; i < seqStoreSeqSize; ++i) {
U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
outSeqs[i].litLength = seqStoreSeqs[i].litLength;
outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
outSeqs[i].rep = 0;
if (i == seqStore->longLengthPos) {
if (seqStore->longLengthType == ZSTD_llt_literalLength) {
outSeqs[i].litLength += 0x10000;
} else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
outSeqs[i].matchLength += 0x10000;
}
}
if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
/* Derive the correct offset corresponding to a repcode */
outSeqs[i].rep = seqStoreSeqs[i].offBase;
if (outSeqs[i].litLength != 0) {
rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
} else {
if (outSeqs[i].rep == 3) {
rawOffset = updatedRepcodes.rep[0] - 1;
} else {
rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
}
}
}
outSeqs[i].offset = rawOffset;
/* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
so we provide seqStoreSeqs[i].offset - 1 */
ZSTD_updateRep(updatedRepcodes.rep,
seqStoreSeqs[i].offBase - 1,
seqStoreSeqs[i].litLength == 0);
literalsRead += outSeqs[i].litLength;
}
/* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
* If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
* for the block boundary, according to the API.
*/
assert(seqStoreLiteralsSize >= literalsRead);
lastLLSize = seqStoreLiteralsSize - literalsRead;
outSeqs[i].litLength = (U32)lastLLSize;
outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
seqStoreSeqSize++;
zc->seqCollector.seqIndex += seqStoreSeqSize;
}
size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize)
{
const size_t dstCapacity = ZSTD_compressBound(srcSize);
void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
SeqCollector seqCollector;
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
seqCollector.collectSequences = 1;
seqCollector.seqStart = outSeqs;
seqCollector.seqIndex = 0;
seqCollector.maxSequences = outSeqsSize;
zc->seqCollector = seqCollector;
ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
ZSTD_customFree(dst, ZSTD_defaultCMem);
return zc->seqCollector.seqIndex;
}
size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
size_t in = 0;
size_t out = 0;
for (; in < seqsSize; ++in) {
if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
if (in != seqsSize - 1) {
sequences[in+1].litLength += sequences[in].litLength;
}
} else {
sequences[out] = sequences[in];
++out;
}
}
return out;
}
/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
static int ZSTD_isRLE(const BYTE* src, size_t length) {
const BYTE* ip = src;
const BYTE value = ip[0];
const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
const size_t unrollSize = sizeof(size_t) * 4;
const size_t unrollMask = unrollSize - 1;
const size_t prefixLength = length & unrollMask;
size_t i;
size_t u;
if (length == 1) return 1;
/* Check if prefix is RLE first before using unrolled loop */
if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
return 0;
}
for (i = prefixLength; i != length; i += unrollSize) {
for (u = 0; u < unrollSize; u += sizeof(size_t)) {
if (MEM_readST(ip + i + u) != valueST) {
return 0;
}
}
}
return 1;
}
/* Returns true if the given block may be RLE.
* This is just a heuristic based on the compressibility.
* It may return both false positives and false negatives.
*/
static int ZSTD_maybeRLE(seqStore_t const* seqStore)
{
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
return nbSeqs < 4 && nbLits < 10;
}
static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
{
ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
bs->prevCBlock = bs->nextCBlock;
bs->nextCBlock = tmp;
}
/* Writes the block header */
static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) {
U32 const cBlockHeader = cSize == 1 ?
lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader);
DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
}
/* ZSTD_buildBlockEntropyStats_literals() :
* Builds entropy for the literals.
* Stores literals block type (raw, rle, compressed, repeat) and
* huffman description table to hufMetadata.
* Requires ENTROPY_WORKSPACE_SIZE workspace
* @return : size of huffman description table or error code */
static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
const ZSTD_hufCTables_t* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_hufCTablesMetadata_t* hufMetadata,
const int literalsCompressionIsDisabled,
void* workspace, size_t wkspSize)
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
BYTE* const countWkspStart = wkspStart;
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
BYTE* const nodeWksp = countWkspStart + countWkspSize;
const size_t nodeWkspSize = wkspEnd-nodeWksp;
unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
unsigned huffLog = HUF_TABLELOG_DEFAULT;
HUF_repeat repeat = prevHuf->repeatMode;
DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
/* Prepare nextEntropy assuming reusing the existing table */
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (literalsCompressionIsDisabled) {
DEBUGLOG(5, "set_basic - disabled");
hufMetadata->hType = set_basic;
return 0;
}
/* small ? don't even attempt compression (speed opt) */
#ifndef COMPRESS_LITERALS_SIZE_MIN
#define COMPRESS_LITERALS_SIZE_MIN 63
#endif
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) {
DEBUGLOG(5, "set_basic - too small");
hufMetadata->hType = set_basic;
return 0;
}
}
/* Scan input and build symbol stats */
{ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
if (largest == srcSize) {
DEBUGLOG(5, "set_rle");
hufMetadata->hType = set_rle;
return 0;
}
if (largest <= (srcSize >> 7)+4) {
DEBUGLOG(5, "set_basic - no gain");
hufMetadata->hType = set_basic;
return 0;
}
}
/* Validate the previous Huffman table */
if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
repeat = HUF_repeat_none;
}
/* Build Huffman Tree */
ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
maxSymbolValue, huffLog,
nodeWksp, nodeWkspSize);
FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
huffLog = (U32)maxBits;
{ /* Build and write the CTable */
size_t const newCSize = HUF_estimateCompressedSize(
(HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
size_t const hSize = HUF_writeCTable_wksp(
hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
(HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
nodeWksp, nodeWkspSize);
/* Check against repeating the previous CTable */
if (repeat != HUF_repeat_none) {
size_t const oldCSize = HUF_estimateCompressedSize(
(HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
DEBUGLOG(5, "set_repeat - smaller");
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_repeat;
return 0;
}
}
if (newCSize + hSize >= srcSize) {
DEBUGLOG(5, "set_basic - no gains");
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_basic;
return 0;
}
DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
hufMetadata->hType = set_compressed;
nextHuf->repeatMode = HUF_repeat_check;
return hSize;
}
}
}
/* ZSTD_buildDummySequencesStatistics():
* Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
* and updates nextEntropy to the appropriate repeatMode.
*/
static ZSTD_symbolEncodingTypeStats_t
ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0};
nextEntropy->litlength_repeatMode = FSE_repeat_none;
nextEntropy->offcode_repeatMode = FSE_repeat_none;
nextEntropy->matchlength_repeatMode = FSE_repeat_none;
return stats;
}
/* ZSTD_buildBlockEntropyStats_sequences() :
* Builds entropy for the sequences.
* Stores symbol compression modes and fse table to fseMetadata.
* Requires ENTROPY_WORKSPACE_SIZE wksp.
* @return : size of fse tables or error code */
static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
const ZSTD_fseCTables_t* prevEntropy,
ZSTD_fseCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize)
{
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
BYTE* const ostart = fseMetadata->fseTablesBuffer;
BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
BYTE* op = ostart;
unsigned* countWorkspace = (unsigned*)workspace;
unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
ZSTD_symbolEncodingTypeStats_t stats;
DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
prevEntropy, nextEntropy, op, oend,
strategy, countWorkspace,
entropyWorkspace, entropyWorkspaceSize)
: ZSTD_buildDummySequencesStatistics(nextEntropy);
FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
fseMetadata->lastCountSize = stats.lastCountSize;
return stats.size;
}
/* ZSTD_buildBlockEntropyStats() :
* Builds entropy for the block.
* Requires workspace size ENTROPY_WORKSPACE_SIZE
*
* @return : 0 on success or error code
*/
size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize)
{
size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
entropyMetadata->hufMetadata.hufDesSize =
ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
&prevEntropy->huf, &nextEntropy->huf,
&entropyMetadata->hufMetadata,
ZSTD_literalsCompressionIsDisabled(cctxParams),
workspace, wkspSize);
FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
entropyMetadata->fseMetadata.fseTablesSize =
ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
&prevEntropy->fse, &nextEntropy->fse,
cctxParams,
&entropyMetadata->fseMetadata,
workspace, wkspSize);
FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
return 0;
}
/* Returns the size estimate for the literals section (header + content) of a block */
static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
const ZSTD_hufCTables_t* huf,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
unsigned* const countWksp = (unsigned*)workspace;
unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
U32 singleStream = litSize < 256;
if (hufMetadata->hType == set_basic) return litSize;
else if (hufMetadata->hType == set_rle) return 1;
else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
if (ZSTD_isError(largest)) return litSize;
{ size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
return cLitSizeEstimate + literalSectionHeaderSize;
} }
assert(0); /* impossible */
return 0;
}
/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
const FSE_CTable* fseCTable,
const U8* additionalBits,
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
void* workspace, size_t wkspSize)
{
unsigned* const countWksp = (unsigned*)workspace;
const BYTE* ctp = codeTable;
const BYTE* const ctStart = ctp;
const BYTE* const ctEnd = ctStart + nbSeq;
size_t cSymbolTypeSizeEstimateInBits = 0;
unsigned max = maxCode;
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
if (type == set_basic) {
/* We selected this encoding type, so it must be valid. */
assert(max <= defaultMax);
(void)defaultMax;
cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
} else if (type == set_rle) {
cSymbolTypeSizeEstimateInBits = 0;
} else if (type == set_compressed || type == set_repeat) {
cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
}
if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
return nbSeq * 10;
}
while (ctp < ctEnd) {
if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
ctp++;
}
return cSymbolTypeSizeEstimateInBits >> 3;
}
/* Returns the size estimate for the sequences section (header + content) of a block */
static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
size_t cSeqSizeEstimate = 0;
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
fseTables->offcodeCTable, NULL,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
fseTables->litlengthCTable, LL_bits,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
fseTables->matchlengthCTable, ML_bits,
ML_defaultNorm, ML_defaultNormLog, MaxML,
workspace, wkspSize);
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
return cSeqSizeEstimate + sequencesSectionHeaderSize;
}
/* Returns the size estimate for a given stream of literals, of, ll, ml */
static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize,
int writeLitEntropy, int writeSeqEntropy) {
size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
&entropy->huf, &entropyMetadata->hufMetadata,
workspace, wkspSize, writeLitEntropy);
size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
workspace, wkspSize, writeSeqEntropy);
return seqSize + literalsSize + ZSTD_blockHeaderSize;
}
/* Builds entropy statistics and uses them for blocksize estimation.
*
* Returns the estimated compressed size of the seqStore, or a zstd error.
*/
static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) {
ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
&zc->blockState.prevCBlock->entropy,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
entropyMetadata,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
(size_t)(seqStore->sequences - seqStore->sequencesStart),
&zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
(int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
}
/* Returns literals bytes represented in a seqStore */
static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) {
size_t literalsBytes = 0;
size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
size_t i;
for (i = 0; i < nbSeqs; ++i) {
seqDef seq = seqStore->sequencesStart[i];
literalsBytes += seq.litLength;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
literalsBytes += 0x10000;
}
}
return literalsBytes;
}
/* Returns match bytes represented in a seqStore */
static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
size_t matchBytes = 0;
size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
size_t i;
for (i = 0; i < nbSeqs; ++i) {
seqDef seq = seqStore->sequencesStart[i];
matchBytes += seq.mlBase + MINMATCH;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
matchBytes += 0x10000;
}
}
return matchBytes;
}
/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
* Stores the result in resultSeqStore.
*/
static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
const seqStore_t* originalSeqStore,
size_t startIdx, size_t endIdx) {
BYTE* const litEnd = originalSeqStore->lit;
size_t literalsBytes;
size_t literalsBytesPreceding = 0;
*resultSeqStore = *originalSeqStore;
if (startIdx > 0) {
resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
}
/* Move longLengthPos into the correct position if necessary */
if (originalSeqStore->longLengthType != ZSTD_llt_none) {
if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
resultSeqStore->longLengthType = ZSTD_llt_none;
} else {
resultSeqStore->longLengthPos -= (U32)startIdx;
}
}
resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
resultSeqStore->litStart += literalsBytesPreceding;
if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
/* This accounts for possible last literals if the derived chunk reaches the end of the block */
resultSeqStore->lit = litEnd;
} else {
resultSeqStore->lit = resultSeqStore->litStart+literalsBytes;
}
resultSeqStore->llCode += startIdx;
resultSeqStore->mlCode += startIdx;
resultSeqStore->ofCode += startIdx;
}
/*
* Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
* offCode must represent a repcode in the numeric representation of ZSTD_storeSeq().
*/
static U32
ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0)
{
U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */
assert(STORED_IS_REPCODE(offCode));
if (adjustedOffCode == ZSTD_REP_NUM) {
/* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
assert(rep[0] > 0);
return rep[0] - 1;
}
return rep[adjustedOffCode];
}
/*
* ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
* due to emission of RLE/raw blocks that disturb the offset history,
* and replaces any repcodes within the seqStore that may be invalid.
*
* dRepcodes are updated as would be on the decompression side.
* cRepcodes are updated exactly in accordance with the seqStore.
*
* Note : this function assumes seq->offBase respects the following numbering scheme :
* 0 : invalid
* 1-3 : repcode 1-3
* 4+ : real_offset+3
*/
static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
seqStore_t* const seqStore, U32 const nbSeq) {
U32 idx = 0;
for (; idx < nbSeq; ++idx) {
seqDef* const seq = seqStore->sequencesStart + idx;
U32 const ll0 = (seq->litLength == 0);
U32 const offCode = OFFBASE_TO_STORED(seq->offBase);
assert(seq->offBase > 0);
if (STORED_IS_REPCODE(offCode)) {
U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
/* Adjust simulated decompression repcode history if we come across a mismatch. Replace
* the repcode with the offset it actually references, determined by the compression
* repcode history.
*/
if (dRawOffset != cRawOffset) {
seq->offBase = cRawOffset + ZSTD_REP_NUM;
}
}
/* Compression repcode history is always updated with values directly from the unmodified seqStore.
* Decompression repcode history may use modified seq->offset value taken from compression repcode history.
*/
ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0);
ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
}
}
/* ZSTD_compressSeqStore_singleBlock():
* Compresses a seqStore into a block with a block header, into the buffer dst.
*
* Returns the total size of that block (including header) or a ZSTD error code.
*/
static size_t
ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
repcodes_t* const dRep, repcodes_t* const cRep,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastBlock, U32 isPartition)
{
const U32 rleMaxLength = 25;
BYTE* op = (BYTE*)dst;
const BYTE* ip = (const BYTE*)src;
size_t cSize;
size_t cSeqsSize;
/* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
repcodes_t const dRepOriginal = *dRep;
DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
if (isPartition)
ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit");
cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
srcSize,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
zc->bmi2);
FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
if (!zc->isFirstBlock &&
cSeqsSize < rleMaxLength &&
ZSTD_isRLE((BYTE const*)src, srcSize)) {
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
*/
cSeqsSize = 1;
}
if (zc->seqCollector.collectSequences) {
ZSTD_copyBlockSequences(zc);
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return 0;
}
if (cSeqsSize == 0) {
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "Nocompress block failed");
DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else if (cSeqsSize == 1) {
cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "RLE compress block failed");
DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
cSize = ZSTD_blockHeaderSize + cSeqsSize;
DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
}
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
return cSize;
}
/* Struct to keep track of where we are in our recursive calls. */
typedef struct {
U32* splitLocations; /* Array of split indices */
size_t idx; /* The current index within splitLocations being worked on */
} seqStoreSplits;
#define MIN_SEQUENCES_BLOCK_SPLITTING 300
/* Helper function to perform the recursive search for block splits.
* Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
* If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then
* we do not recurse.
*
* Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING.
* In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
* In practice, recursion depth usually doesn't go beyond 4.
*
* Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
* maximum of 128 KB, this value is actually impossible to reach.
*/
static void
ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
{
seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
size_t estimatedOriginalSize;
size_t estimatedFirstHalfSize;
size_t estimatedSecondHalfSize;
size_t midIdx = (startIdx + endIdx)/2;
if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences");
return;
}
DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
return;
}
if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
splits->splitLocations[splits->idx] = (U32)midIdx;
splits->idx++;
ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
}
}
/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio.
*
* Returns the number of splits made (which equals the size of the partition table - 1).
*/
static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) {
seqStoreSplits splits = {partitions, 0};
if (nbSeq <= 4) {
DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split");
/* Refuse to try and split anything with less than 4 sequences */
return 0;
}
ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
splits.splitLocations[splits.idx] = nbSeq;
DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
return splits.idx;
}
/* ZSTD_compressBlock_splitBlock():
* Attempts to split a given block into multiple blocks to improve compression ratio.
*
* Returns combined size of all blocks (which includes headers), or a ZSTD error code.
*/
static size_t
ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq)
{
size_t cSize = 0;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
size_t i = 0;
size_t srcBytesTotal = 0;
U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore;
size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
/* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
* may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
* separate repcode histories that simulate repcode history on compression and decompression side,
* and use the histories to determine whether we must replace a particular repcode with its raw offset.
*
* 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
* or RLE. This allows us to retrieve the offset value that an invalid repcode references within
* a nocompress/RLE block.
* 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
* the replacement offset value rather than the original repcode to update the repcode history.
* dRep also will be the final repcode history sent to the next block.
*
* See ZSTD_seqStore_resolveOffCodes() for more details.
*/
repcodes_t dRep;
repcodes_t cRep;
ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
(unsigned)zc->blockState.matchState.nextToUpdate);
if (numSplits == 0) {
size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
&dRep, &cRep,
op, dstCapacity,
ip, blockSize,
lastBlock, 0 /* isPartition */);
FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
return cSizeSingleBlock;
}
ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
for (i = 0; i <= numSplits; ++i) {
size_t srcBytes;
size_t cSizeChunk;
U32 const lastPartition = (i == numSplits);
U32 lastBlockEntireSrc = 0;
srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
srcBytesTotal += srcBytes;
if (lastPartition) {
/* This is the final partition, need to account for possible last literals */
srcBytes += blockSize - srcBytesTotal;
lastBlockEntireSrc = lastBlock;
} else {
ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
}
cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore,
&dRep, &cRep,
op, dstCapacity,
ip, srcBytes,
lastBlockEntireSrc, 1 /* isPartition */);
DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
ip += srcBytes;
op += cSizeChunk;
dstCapacity -= cSizeChunk;
cSize += cSizeChunk;
*currSeqStore = *nextSeqStore;
assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
}
/* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes
* for the next block.
*/
ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
return cSize;
}
static size_t
ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 lastBlock)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
U32 nbSeq;
size_t cSize;
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
if (bss == ZSTDbss_noCompress) {
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
return cSize;
}
nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
}
cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
return cSize;
}
static size_t
ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 frame)
{
/* This the upper bound for the length of an rle block.
* This isn't the actual upper bound. Finding the real threshold
* needs further investigation.
*/
const U32 rleMaxLength = 25;
size_t cSize;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
(unsigned)zc->blockState.matchState.nextToUpdate);
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
}
if (zc->seqCollector.collectSequences) {
ZSTD_copyBlockSequences(zc);
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return 0;
}
/* encode sequences and literals */
cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
dst, dstCapacity,
srcSize,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
zc->bmi2);
if (frame &&
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
*/
!zc->isFirstBlock &&
cSize < rleMaxLength &&
ZSTD_isRLE(ip, srcSize))
{
cSize = 1;
op[0] = ip[0];
}
out:
if (!ZSTD_isError(cSize) && cSize > 1) {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
}
/* We check that dictionaries have offset codes available for the first
* block. After the first block, the offcode table might not have large
* enough codes to represent the offsets in the data.
*/
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
return cSize;
}
static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const size_t bss, U32 lastBlock)
{
DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
if (bss == ZSTDbss_compress) {
if (/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
*/
!zc->isFirstBlock &&
ZSTD_maybeRLE(&zc->seqStore) &&
ZSTD_isRLE((BYTE const*)src, srcSize))
{
return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
}
/* Attempt superblock compression.
*
* Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
* standard ZSTD_compressBound(). This is a problem, because even if we have
* space now, taking an extra byte now could cause us to run out of space later
* and violate ZSTD_compressBound().
*
* Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
*
* In order to respect ZSTD_compressBound() we must attempt to emit a raw
* uncompressed block in these cases:
* * cSize == 0: Return code for an uncompressed block.
* * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
* ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
* output space.
* * cSize >= blockBound(srcSize): We have expanded the block too much so
* emit an uncompressed block.
*/
{
size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
if (cSize != ERROR(dstSize_tooSmall)) {
size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return cSize;
}
}
}
}
DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
/* Superblock compression failed, attempt to emit a single no compress block.
* The decoder will be able to stream this block since it is uncompressed.
*/
return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
}
static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastBlock)
{
size_t cSize = 0;
const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
return cSize;
}
static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
void const* ip,
void const* iend)
{
U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
U32 const maxDist = (U32)1 << params->cParams.windowLog;
if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
ZSTD_cwksp_mark_tables_dirty(ws);
ZSTD_reduceIndex(ms, params, correction);
ZSTD_cwksp_mark_tables_clean(ws);
if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
else ms->nextToUpdate -= correction;
/* invalidate dictionaries on overflow correction */
ms->loadedDictEnd = 0;
ms->dictMatchState = NULL;
}
}
/*! ZSTD_compress_frameChunk() :
* Compress a chunk of data into one or multiple blocks.
* All blocks will be terminated, all input will be consumed.
* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
* Frame is supposed already started (header already produced)
* @return : compressed size, or an error code
*/
static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastFrameChunk)
{
size_t blockSize = cctx->blockSize;
size_t remaining = srcSize;
const BYTE* ip = (const BYTE*)src;
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
if (cctx->appliedParams.fParams.checksumFlag && srcSize)
xxh64_update(&cctx->xxhState, src, srcSize);
while (remaining) {
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
dstSize_tooSmall,
"not enough space to store compressed block");
if (remaining < blockSize) blockSize = remaining;
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
/* Ensure hash/chain table insertion resumes no sooner than lowlimit */
if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
{ size_t cSize;
if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
assert(cSize > 0);
assert(cSize <= blockSize + ZSTD_blockHeaderSize);
} else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
} else {
cSize = ZSTD_compressBlock_internal(cctx,
op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
ip, blockSize, 1 /* frame */);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
if (cSize == 0) { /* block is not compressible */
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
} else {
U32 const cBlockHeader = cSize == 1 ?
lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader);
cSize += ZSTD_blockHeaderSize;
}
}
ip += blockSize;
assert(remaining >= blockSize);
remaining -= blockSize;
op += cSize;
assert(dstCapacity >= cSize);
dstCapacity -= cSize;
cctx->isFirstBlock = 0;
DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
(unsigned)cSize);
} }
if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
return (size_t)(op-ostart);
}
static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
{ BYTE* const op = (BYTE*)dst;
U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
U32 const checksumFlag = params->fParams.checksumFlag>0;
U32 const windowSize = (U32)1 << params->cParams.windowLog;
U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
U32 const fcsCode = params->fParams.contentSizeFlag ?
(pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
size_t pos=0;
assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
"dst buf is too small to fit worst-case frame header size.");
DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
!params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
if (params->format == ZSTD_f_zstd1) {
MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
pos = 4;
}
op[pos++] = frameHeaderDescriptionByte;
if (!singleSegment) op[pos++] = windowLogByte;
switch(dictIDSizeCode)
{
default:
assert(0); /* impossible */
ZSTD_FALLTHROUGH;
case 0 : break;
case 1 : op[pos] = (BYTE)(dictID); pos++; break;
case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
}
switch(fcsCode)
{
default:
assert(0); /* impossible */
ZSTD_FALLTHROUGH;
case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
}
return pos;
}
/* ZSTD_writeSkippableFrame_advanced() :
* Writes out a skippable frame with the specified magic number variant (16 are supported),
* from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
*
* Returns the total number of bytes written, or a ZSTD error code.
*/
size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
const void* src, size_t srcSize, unsigned magicVariant) {
BYTE* op = (BYTE*)dst;
RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
dstSize_tooSmall, "Not enough room for skippable frame");
RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
MEM_writeLE32(op+4, (U32)srcSize);
ZSTD_memcpy(op+8, src, srcSize);
return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
}
/* ZSTD_writeLastEmptyBlock() :
* output an empty Block with end-of-frame mark to complete a frame
* @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
* or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
*/
size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
{
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
"dst buf is too small to write frame trailer empty block.");
{ U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
MEM_writeLE24(dst, cBlockHeader24);
return ZSTD_blockHeaderSize;
}
}
size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
{
RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
"wrong cctx stage");
RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
parameter_unsupported,
"incompatible with ldm");
cctx->externSeqStore.seq = seq;
cctx->externSeqStore.size = nbSeq;
cctx->externSeqStore.capacity = nbSeq;
cctx->externSeqStore.pos = 0;
cctx->externSeqStore.posInSequence = 0;
return 0;
}
static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 frame, U32 lastFrameChunk)
{
ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
size_t fhSize = 0;
DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
cctx->stage, (unsigned)srcSize);
RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
"missing init (ZSTD_compressBegin)");
if (frame && (cctx->stage==ZSTDcs_init)) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
assert(fhSize <= dstCapacity);
dstCapacity -= fhSize;
dst = (char*)dst + fhSize;
cctx->stage = ZSTDcs_ongoing;
}
if (!srcSize) return fhSize; /* do not generate an empty block if no input */
if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
ms->forceNonContiguous = 0;
ms->nextToUpdate = ms->window.dictLimit;
}
if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
}
if (!frame) {
/* overflow check and correction for block mode */
ZSTD_overflowCorrectIfNeeded(
ms, &cctx->workspace, &cctx->appliedParams,
src, (BYTE const*)src + srcSize);
}
DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
{ size_t const cSize = frame ?
ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
cctx->consumedSrcSize += srcSize;
cctx->producedCSize += (cSize + fhSize);
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
RETURN_ERROR_IF(
cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
srcSize_wrong,
"error : pledgedSrcSize = %u, while realSrcSize >= %u",
(unsigned)cctx->pledgedSrcSizePlusOne-1,
(unsigned)cctx->consumedSrcSize);
}
return cSize + fhSize;
}
}
size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
}
size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
{
ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
assert(!ZSTD_checkCParams(cParams));
return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
}
size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
{ size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
}
/*! ZSTD_loadDictionaryContent() :
* @return : 0, or an error code
*/
static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* src, size_t srcSize,
ZSTD_dictTableLoadMethod_e dtlm)
{
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
/* Assert that we the ms params match the params we're being given */
ZSTD_assertEqualCParams(params->cParams, ms->cParams);
if (srcSize > ZSTD_CHUNKSIZE_MAX) {
/* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
* Dictionaries right at the edge will immediately trigger overflow
* correction, but I don't want to insert extra constraints here.
*/
U32 const maxDictSize = ZSTD_CURRENT_MAX - 1;
/* We must have cleared our windows when our source is this large. */
assert(ZSTD_window_isEmpty(ms->window));
if (loadLdmDict)
assert(ZSTD_window_isEmpty(ls->window));
/* If the dictionary is too large, only load the suffix of the dictionary. */
if (srcSize > maxDictSize) {
ip = iend - maxDictSize;
src = ip;
srcSize = maxDictSize;
}
}
DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
ms->forceNonContiguous = params->deterministicRefPrefix;
if (loadLdmDict) {
ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
}
if (srcSize <= HASH_READ_SIZE) return 0;
ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
if (loadLdmDict)
ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams);
switch(params->cParams.strategy)
{
case ZSTD_fast:
ZSTD_fillHashTable(ms, iend, dtlm);
break;
case ZSTD_dfast:
ZSTD_fillDoubleHashTable(ms, iend, dtlm);
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
assert(srcSize >= HASH_READ_SIZE);
if (ms->dedicatedDictSearch) {
assert(ms->chainTable != NULL);
ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
} else {
assert(params->useRowMatchFinder != ZSTD_ps_auto);
if (params->useRowMatchFinder == ZSTD_ps_enable) {
size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16);
ZSTD_memset(ms->tagTable, 0, tagTableSize);
ZSTD_row_update(ms, iend-HASH_READ_SIZE);
DEBUGLOG(4, "Using row-based hash table for lazy dict");
} else {
ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
DEBUGLOG(4, "Using chain-based hash table for lazy dict");
}
}
break;
case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
assert(srcSize >= HASH_READ_SIZE);
ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
break;
default:
assert(0); /* not possible : not a valid strategy id */
}
ms->nextToUpdate = (U32)(iend - ms->window.base);
return 0;
}
/* Dictionaries that assign zero probability to symbols that show up causes problems
* when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
* and only dictionaries with 100% valid symbols can be assumed valid.
*/
static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
{
U32 s;
if (dictMaxSymbolValue < maxSymbolValue) {
return FSE_repeat_check;
}
for (s = 0; s <= maxSymbolValue; ++s) {
if (normalizedCounter[s] == 0) {
return FSE_repeat_check;
}
}
return FSE_repeat_valid;
}
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
const void* const dict, size_t dictSize)
{
short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff;
const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
const BYTE* const dictEnd = dictPtr + dictSize;
dictPtr += 8;
bs->entropy.huf.repeatMode = HUF_repeat_check;
{ unsigned maxSymbolValue = 255;
unsigned hasZeroWeights = 1;
size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
dictEnd-dictPtr, &hasZeroWeights);
/* We only set the loaded table as valid if it contains all non-zero
* weights. Otherwise, we set it to check */
if (!hasZeroWeights)
bs->entropy.huf.repeatMode = HUF_repeat_valid;
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
dictPtr += hufHeaderSize;
}
{ unsigned offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
/* fill all offset symbols to avoid garbage at end of table */
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.offcodeCTable,
offcodeNCount, MaxOff, offcodeLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
dictPtr += offcodeHeaderSize;
}
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.matchlengthCTable,
matchlengthNCount, matchlengthMaxValue, matchlengthLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
dictPtr += matchlengthHeaderSize;
}
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
bs->entropy.fse.litlengthCTable,
litlengthNCount, litlengthMaxValue, litlengthLog,
workspace, HUF_WORKSPACE_SIZE)),
dictionary_corrupted, "");
bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
dictPtr += litlengthHeaderSize;
}
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
bs->rep[0] = MEM_readLE32(dictPtr+0);
bs->rep[1] = MEM_readLE32(dictPtr+4);
bs->rep[2] = MEM_readLE32(dictPtr+8);
dictPtr += 12;
{ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
U32 offcodeMax = MaxOff;
if (dictContentSize <= ((U32)-1) - 128 KB) {
U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
}
/* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
/* All repCodes must be <= dictContentSize and != 0 */
{ U32 u;
for (u=0; u<3; u++) {
RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
} } }
return dictPtr - (const BYTE*)dict;
}
/* Dictionary format :
* See :
* https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
*/
/*! ZSTD_loadZstdDictionary() :
* @return : dictID, or an error code
* assumptions : magic number supposed already checked
* dictSize supposed >= 8
*/
static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_matchState_t* ms,
ZSTD_cwksp* ws,
ZSTD_CCtx_params const* params,
const void* dict, size_t dictSize,
ZSTD_dictTableLoadMethod_e dtlm,
void* workspace)
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
size_t dictID;
size_t eSize;
ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
assert(dictSize >= 8);
assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
dictPtr += eSize;
{
size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
}
return dictID;
}
/* ZSTD_compress_insertDictionary() :
* @return : dictID, or an error code */
static size_t
ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
ZSTD_matchState_t* ms,
ldmState_t* ls,
ZSTD_cwksp* ws,
const ZSTD_CCtx_params* params,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
void* workspace)
{
DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
if ((dict==NULL) || (dictSize<8)) {
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
return 0;
}
ZSTD_reset_compressedBlockState(bs);
/* dict restricted modes */
if (dictContentType == ZSTD_dct_rawContent)
return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
if (dictContentType == ZSTD_dct_auto) {
DEBUGLOG(4, "raw content dictionary detected");
return ZSTD_loadDictionaryContent(
ms, ls, ws, params, dict, dictSize, dtlm);
}
RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
assert(0); /* impossible */
}
/* dict as full zstd dictionary */
return ZSTD_loadZstdDictionary(
bs, ms, ws, params, dict, dictSize, dtlm, workspace);
}
#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
/*! ZSTD_compressBegin_internal() :
* @return : 0, or an error code */
static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
ZSTD_buffered_policy_e zbuff)
{
size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
/* params are supposed to be fully validated at this point */
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if ( (cdict)
&& (cdict->dictContentSize > 0)
&& ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
|| pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| cdict->compressionLevel == 0)
&& (params->attachDictPref != ZSTD_dictForceLoad) ) {
return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
}
FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
dictContentSize,
ZSTDcrp_makeClean, zbuff) , "");
{ size_t const dictID = cdict ?
ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
cdict->dictContentSize, cdict->dictContentType, dtlm,
cctx->entropyWorkspace)
: ZSTD_compress_insertDictionary(
cctx->blockState.prevCBlock, &cctx->blockState.matchState,
&cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
dictContentType, dtlm, cctx->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= UINT_MAX);
cctx->dictID = (U32)dictID;
cctx->dictContentSize = dictContentSize;
}
return 0;
}
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_dictContentType_e dictContentType,
ZSTD_dictTableLoadMethod_e dtlm,
const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
/* compression parameters verification and optimization */
FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
return ZSTD_compressBegin_internal(cctx,
dict, dictSize, dictContentType, dtlm,
cdict,
params, pledgedSrcSize,
ZSTDb_not_buffered);
}
/*! ZSTD_compressBegin_advanced() :
* @return : 0, or an error code */
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize)
{
ZSTD_CCtx_params cctxParams;
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, ZSTD_NO_CLEVEL);
return ZSTD_compressBegin_advanced_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
NULL /*cdict*/,
&cctxParams, pledgedSrcSize);
}
size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_CCtx_params cctxParams;
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
}
DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
}
size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
{
return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
}
/*! ZSTD_writeEpilogue() :
* Ends a frame.
* @return : nb of bytes written into dst (or an error code) */
static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* op = ostart;
size_t fhSize = 0;
DEBUGLOG(4, "ZSTD_writeEpilogue");
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
/* special case : empty frame */
if (cctx->stage == ZSTDcs_init) {
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
dstCapacity -= fhSize;
op += fhSize;
cctx->stage = ZSTDcs_ongoing;
}
if (cctx->stage != ZSTDcs_ending) {
/* write one last empty block, make it the "last" block */
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
MEM_writeLE32(op, cBlockHeader24);
op += ZSTD_blockHeaderSize;
dstCapacity -= ZSTD_blockHeaderSize;
}
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
MEM_writeLE32(op, checksum);
op += 4;
}
cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
return op-ostart;
}
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
{
(void)cctx;
(void)extraCSize;
}
size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
size_t endResult;
size_t const cSize = ZSTD_compressContinue_internal(cctx,
dst, dstCapacity, src, srcSize,
1 /* frame mode */, 1 /* last chunk */);
FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
DEBUGLOG(4, "end of frame : controlling src size");
RETURN_ERROR_IF(
cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
srcSize_wrong,
"error : pledgedSrcSize = %u, while realSrcSize = %u",
(unsigned)cctx->pledgedSrcSizePlusOne-1,
(unsigned)cctx->consumedSrcSize);
}
ZSTD_CCtx_trace(cctx, endResult);
return cSize + endResult;
}
size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params)
{
DEBUGLOG(4, "ZSTD_compress_advanced");
FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, ZSTD_NO_CLEVEL);
return ZSTD_compress_advanced_internal(cctx,
dst, dstCapacity,
src, srcSize,
dict, dictSize,
&cctx->simpleApiParams);
}
/* Internal */
size_t ZSTD_compress_advanced_internal(
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
const ZSTD_CCtx_params* params)
{
DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
params, srcSize, ZSTDb_not_buffered) , "");
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize,
int compressionLevel)
{
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
assert(params.fParams.contentSizeFlag == 1);
ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
}
DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
}
size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
{
DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
assert(cctx != NULL);
return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
}
size_t ZSTD_compress(void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel)
{
size_t result;
ZSTD_CCtx* cctx = ZSTD_createCCtx();
RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
ZSTD_freeCCtx(cctx);
return result;
}
/* ===== Dictionary API ===== */
/*! ZSTD_estimateCDictSize_advanced() :
* Estimate amount of memory that will be needed to create a dictionary with following arguments */
size_t ZSTD_estimateCDictSize_advanced(
size_t dictSize, ZSTD_compressionParameters cParams,
ZSTD_dictLoadMethod_e dictLoadMethod)
{
DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
/* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
* in case we are using DDS with row-hash. */
+ ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
/* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
}
size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
}
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support sizeof on NULL */
DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
/* cdict may be in the workspace */
return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
+ ZSTD_cwksp_sizeof(&cdict->workspace);
}
static size_t ZSTD_initCDict_internal(
ZSTD_CDict* cdict,
const void* dictBuffer, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_CCtx_params params)
{
DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
assert(!ZSTD_checkCParams(params.cParams));
cdict->matchState.cParams = params.cParams;
cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
cdict->dictContent = dictBuffer;
} else {
void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
cdict->dictContent = internalBuffer;
ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
}
cdict->dictContentSize = dictSize;
cdict->dictContentType = dictContentType;
cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
/* Reset the state to no dictionary */
ZSTD_reset_compressedBlockState(&cdict->cBlockState);
FORWARD_IF_ERROR(ZSTD_reset_matchState(
&cdict->matchState,
&cdict->workspace,
¶ms.cParams,
params.useRowMatchFinder,
ZSTDcrp_makeClean,
ZSTDirp_reset,
ZSTD_resetTarget_CDict), "");
/* (Maybe) load the dictionary
* Skips loading the dictionary if it is < 8 bytes.
*/
{ params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
params.fParams.contentSizeFlag = 1;
{ size_t const dictID = ZSTD_compress_insertDictionary(
&cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
¶ms, cdict->dictContent, cdict->dictContentSize,
dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
assert(dictID <= (size_t)(U32)-1);
cdict->dictID = (U32)dictID;
}
}
return 0;
}
static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_compressionParameters cParams,
ZSTD_paramSwitch_e useRowMatchFinder,
U32 enableDedicatedDictSearch,
ZSTD_customMem customMem)
{
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ size_t const workspaceSize =
ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
(dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
ZSTD_cwksp ws;
ZSTD_CDict* cdict;
if (!workspace) {
ZSTD_customFree(workspace, customMem);
return NULL;
}
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
assert(cdict != NULL);
ZSTD_cwksp_move(&cdict->workspace, &ws);
cdict->customMem = customMem;
cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
cdict->useRowMatchFinder = useRowMatchFinder;
return cdict;
}
}
ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams,
ZSTD_customMem customMem)
{
ZSTD_CCtx_params cctxParams;
ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
ZSTD_CCtxParams_init(&cctxParams, 0);
cctxParams.cParams = cParams;
cctxParams.customMem = customMem;
return ZSTD_createCDict_advanced2(
dictBuffer, dictSize,
dictLoadMethod, dictContentType,
&cctxParams, customMem);
}
ZSTD_CDict* ZSTD_createCDict_advanced2(
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
const ZSTD_CCtx_params* originalCctxParams,
ZSTD_customMem customMem)
{
ZSTD_CCtx_params cctxParams = *originalCctxParams;
ZSTD_compressionParameters cParams;
ZSTD_CDict* cdict;
DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
if (cctxParams.enableDedicatedDictSearch) {
cParams = ZSTD_dedicatedDictSearch_getCParams(
cctxParams.compressionLevel, dictSize);
ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
} else {
cParams = ZSTD_getCParamsFromCCtxParams(
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
}
if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
/* Fall back to non-DDSS params */
cctxParams.enableDedicatedDictSearch = 0;
cParams = ZSTD_getCParamsFromCCtxParams(
&cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
}
DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
cctxParams.cParams = cParams;
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
cdict = ZSTD_createCDict_advanced_internal(dictSize,
dictLoadMethod, cctxParams.cParams,
cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
customMem);
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
dictLoadMethod, dictContentType,
cctxParams) )) {
ZSTD_freeCDict(cdict);
return NULL;
}
return cdict;
}
ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byCopy, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
if (cdict)
cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
return cdict;
}
ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
ZSTD_dlm_byRef, ZSTD_dct_auto,
cParams, ZSTD_defaultCMem);
if (cdict)
cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
return cdict;
}
size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0; /* support free on NULL */
{ ZSTD_customMem const cMem = cdict->customMem;
int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
ZSTD_cwksp_free(&cdict->workspace, cMem);
if (!cdictInWorkspace) {
ZSTD_customFree(cdict, cMem);
}
return 0;
}
}
/*! ZSTD_initStaticCDict_advanced() :
* Generate a digested dictionary in provided memory area.
* workspace: The memory area to emplace the dictionary into.
* Provided pointer must 8-bytes aligned.
* It must outlive dictionary usage.
* workspaceSize: Use ZSTD_estimateCDictSize()
* to determine how large workspace must be.
* cParams : use ZSTD_getCParams() to transform a compression level
* into its relevants cParams.
* @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
* Note : there is no corresponding "free" function.
* Since workspace was allocated externally, it must be freed externally.
*/
const ZSTD_CDict* ZSTD_initStaticCDict(
void* workspace, size_t workspaceSize,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_compressionParameters cParams)
{
ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
/* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
: ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ matchStateSize;
ZSTD_CDict* cdict;
ZSTD_CCtx_params params;
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
{
ZSTD_cwksp ws;
ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
if (cdict == NULL) return NULL;
ZSTD_cwksp_move(&cdict->workspace, &ws);
}
DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
(unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
if (workspaceSize < neededSize) return NULL;
ZSTD_CCtxParams_init(¶ms, 0);
params.cParams = cParams;
params.useRowMatchFinder = useRowMatchFinder;
cdict->useRowMatchFinder = useRowMatchFinder;
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
dict, dictSize,
dictLoadMethod, dictContentType,
params) ))
return NULL;
return cdict;
}
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
{
assert(cdict != NULL);
return cdict->matchState.cParams;
}
/*! ZSTD_getDictID_fromCDict() :
* Provides the dictID of the dictionary loaded into `cdict`.
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
{
if (cdict==NULL) return 0;
return cdict->dictID;
}
/* ZSTD_compressBegin_usingCDict_internal() :
* Implementation of various ZSTD_compressBegin_usingCDict* functions.
*/
static size_t ZSTD_compressBegin_usingCDict_internal(
ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
ZSTD_CCtx_params cctxParams;
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
/* Initialize the cctxParams from the cdict */
{
ZSTD_parameters params;
params.fParams = fParams;
params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
|| pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
|| pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
|| cdict->compressionLevel == 0 ) ?
ZSTD_getCParamsFromCDict(cdict)
: ZSTD_getCParams(cdict->compressionLevel,
pledgedSrcSize,
cdict->dictContentSize);
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, cdict->compressionLevel);
}
/* Increase window log to fit the entire dictionary and source if the
* source size is known. Limit the increase to 19, which is the
* window log for compression level 1 with the largest source size.
*/
if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
}
return ZSTD_compressBegin_internal(cctx,
NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
cdict,
&cctxParams, pledgedSrcSize,
ZSTDb_not_buffered);
}
/* ZSTD_compressBegin_usingCDict_advanced() :
* This function is DEPRECATED.
* cdict must be != NULL */
size_t ZSTD_compressBegin_usingCDict_advanced(
ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
}
/* ZSTD_compressBegin_usingCDict() :
* cdict must be != NULL */
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
}
/*! ZSTD_compress_usingCDict_internal():
* Implementation of various ZSTD_compress_usingCDict* functions.
*/
static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
}
/*! ZSTD_compress_usingCDict_advanced():
* This function is DEPRECATED.
*/
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
}
/*! ZSTD_compress_usingCDict() :
* Compression using a digested Dictionary.
* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
* Note that compression parameters are decided at CDict creation time
* while frame parameters are hardcoded */
size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict)
{
ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
}
/* ******************************************************************
* Streaming
********************************************************************/
ZSTD_CStream* ZSTD_createCStream(void)
{
DEBUGLOG(3, "ZSTD_createCStream");
return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
}
ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
{
return ZSTD_initStaticCCtx(workspace, workspaceSize);
}
ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
{ /* CStream and CCtx are now same object */
return ZSTD_createCCtx_advanced(customMem);
}
size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
{
return ZSTD_freeCCtx(zcs); /* same object */
}
/*====== Initialization ======*/
size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
size_t ZSTD_CStreamOutSize(void)
{
return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
}
static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
{
if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
return ZSTD_cpm_attachDict;
else
return ZSTD_cpm_noAttachDict;
}
/* ZSTD_resetCStream():
* pledgedSrcSize == 0 means "unknown" */
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
{
/* temporary : 0 interpreted as "unknown" during transition period.
* Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
* 0 will be interpreted as "empty" in the future.
*/
U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
return 0;
}
/*! ZSTD_initCStream_internal() :
* Note : for lib/compress only. Used by zstdmt_compress.c.
* Assumption 1 : params are valid
* Assumption 2 : either dict, or cdict, is defined, not both */
size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
const ZSTD_CCtx_params* params,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_internal");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
zcs->requestedParams = *params;
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
if (dict) {
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
} else {
/* Dictionary is cleared if !cdict */
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
}
return 0;
}
/* ZSTD_initCStream_usingCDict_advanced() :
* same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
unsigned long long pledgedSrcSize)
{
DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
zcs->requestedParams.fParams = fParams;
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
return 0;
}
/* note : cdict must outlive compression session */
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
{
DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
return 0;
}
/* ZSTD_initCStream_advanced() :
* pledgedSrcSize must be exact.
* if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
* dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pss)
{
/* for compatibility with older programs relying on this behavior.
* Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
* This line will be removed in the future.
*/
U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
DEBUGLOG(4, "ZSTD_initCStream_advanced");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, ¶ms);
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
return 0;
}
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
{
DEBUGLOG(4, "ZSTD_initCStream_usingDict");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
return 0;
}
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
{
/* temporary : 0 interpreted as "unknown" during transition period.
* Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
* 0 will be interpreted as "empty" in the future.
*/
U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
DEBUGLOG(4, "ZSTD_initCStream_srcSize");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
return 0;
}
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
{
DEBUGLOG(4, "ZSTD_initCStream");
FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
return 0;
}
/*====== Compression ======*/
static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
{
size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
if (hintInSize==0) hintInSize = cctx->blockSize;
return hintInSize;
}
/* ZSTD_compressStream_generic():
* internal function for all *compressStream*() variants
* non-static, because can be called from zstdmt_compress.c
* @return : hint size for next input */
static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective const flushMode)
{
const char* const istart = (const char*)input->src;
const char* const iend = input->size != 0 ? istart + input->size : istart;
const char* ip = input->pos != 0 ? istart + input->pos : istart;
char* const ostart = (char*)output->dst;
char* const oend = output->size != 0 ? ostart + output->size : ostart;
char* op = output->pos != 0 ? ostart + output->pos : ostart;
U32 someMoreWork = 1;
/* check expectations */
DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
assert(zcs->inBuff != NULL);
assert(zcs->inBuffSize > 0);
}
if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
assert(zcs->outBuff != NULL);
assert(zcs->outBuffSize > 0);
}
assert(output->pos <= output->size);
assert(input->pos <= input->size);
assert((U32)flushMode <= (U32)ZSTD_e_end);
while (someMoreWork) {
switch(zcs->streamStage)
{
case zcss_init:
RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
case zcss_load:
if ( (flushMode == ZSTD_e_end)
&& ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
|| zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
&& (zcs->inBuffPos == 0) ) {
/* shortcut to compression pass directly into output buffer */
size_t const cSize = ZSTD_compressEnd(zcs,
op, oend-op, ip, iend-ip);
DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
ip = iend;
op += cSize;
zcs->frameEnded = 1;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
someMoreWork = 0; break;
}
/* complete loading into inBuffer in buffered mode */
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
size_t const loaded = ZSTD_limitCopy(
zcs->inBuff + zcs->inBuffPos, toLoad,
ip, iend-ip);
zcs->inBuffPos += loaded;
if (loaded != 0)
ip += loaded;
if ( (flushMode == ZSTD_e_continue)
&& (zcs->inBuffPos < zcs->inBuffTarget) ) {
/* not enough input to fill full block : stop here */
someMoreWork = 0; break;
}
if ( (flushMode == ZSTD_e_flush)
&& (zcs->inBuffPos == zcs->inToCompress) ) {
/* empty */
someMoreWork = 0; break;
}
}
/* compress current block (note : this stage cannot be stopped in the middle) */
DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
{ int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
void* cDst;
size_t cSize;
size_t oSize = oend-op;
size_t const iSize = inputBuffered
? zcs->inBuffPos - zcs->inToCompress
: MIN((size_t)(iend - ip), zcs->blockSize);
if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
cDst = op; /* compress into output buffer, to skip flush stage */
else
cDst = zcs->outBuff, oSize = zcs->outBuffSize;
if (inputBuffered) {
unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
cSize = lastBlock ?
ZSTD_compressEnd(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize) :
ZSTD_compressContinue(zcs, cDst, oSize,
zcs->inBuff + zcs->inToCompress, iSize);
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
/* prepare next block */
zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
if (zcs->inBuffTarget > zcs->inBuffSize)
zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
(unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
if (!lastBlock)
assert(zcs->inBuffTarget <= zcs->inBuffSize);
zcs->inToCompress = zcs->inBuffPos;
} else {
unsigned const lastBlock = (ip + iSize == iend);
assert(flushMode == ZSTD_e_end /* Already validated */);
cSize = lastBlock ?
ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
/* Consume the input prior to error checking to mirror buffered mode. */
if (iSize > 0)
ip += iSize;
FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
zcs->frameEnded = lastBlock;
if (lastBlock)
assert(ip == iend);
}
if (cDst == op) { /* no need to flush */
op += cSize;
if (zcs->frameEnded) {
DEBUGLOG(5, "Frame completed directly in outBuffer");
someMoreWork = 0;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
}
break;
}
zcs->outBuffContentSize = cSize;
zcs->outBuffFlushedSize = 0;
zcs->streamStage = zcss_flush; /* pass-through to flush stage */
}
ZSTD_FALLTHROUGH;
case zcss_flush:
DEBUGLOG(5, "flush stage");
assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
{ size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
(unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
if (flushed)
op += flushed;
zcs->outBuffFlushedSize += flushed;
if (toFlush!=flushed) {
/* flush not fully completed, presumably because dst is too small */
assert(op==oend);
someMoreWork = 0;
break;
}
zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
if (zcs->frameEnded) {
DEBUGLOG(5, "Frame completed on flush");
someMoreWork = 0;
ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
break;
}
zcs->streamStage = zcss_load;
break;
}
default: /* impossible */
assert(0);
}
}
input->pos = ip - istart;
output->pos = op - ostart;
if (zcs->frameEnded) return 0;
return ZSTD_nextInputSizeHint(zcs);
}
static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
{
return ZSTD_nextInputSizeHint(cctx);
}
size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
return ZSTD_nextInputSizeHint_MTorST(zcs);
}
/* After a compression call set the expected input/output buffer.
* This is validated at the start of the next compression call.
*/
static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
{
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
cctx->expectedInBuffer = *input;
}
if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
cctx->expectedOutBufferSize = output->size - output->pos;
}
}
/* Validate that the input/output buffers match the expectations set by
* ZSTD_setBufferExpectations.
*/
static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
ZSTD_outBuffer const* output,
ZSTD_inBuffer const* input,
ZSTD_EndDirective endOp)
{
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
ZSTD_inBuffer const expect = cctx->expectedInBuffer;
if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
if (endOp != ZSTD_e_end)
RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
}
if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
size_t const outBufferSize = output->size - output->pos;
if (cctx->expectedOutBufferSize != outBufferSize)
RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
}
return 0;
}
static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
ZSTD_EndDirective endOp,
size_t inSize) {
ZSTD_CCtx_params params = cctx->requestedParams;
ZSTD_prefixDict const prefixDict = cctx->prefixDict;
FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
if (cctx->cdict && !cctx->localDict.cdict) {
/* Let the cdict's compression level take priority over the requested params.
* But do not take the cdict's compression level if the "cdict" is actually a localDict
* generated from ZSTD_initLocalDict().
*/
params.compressionLevel = cctx->cdict->compressionLevel;
}
DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
{
size_t const dictSize = prefixDict.dict
? prefixDict.dictSize
: (cctx->cdict ? cctx->cdict->dictContentSize : 0);
ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1);
params.cParams = ZSTD_getCParamsFromCCtxParams(
¶ms, cctx->pledgedSrcSizePlusOne-1,
dictSize, mode);
}
params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams);
params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams);
params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams);
{ U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
cctx->cdict,
¶ms, pledgedSrcSize,
ZSTDb_buffered) , "");
assert(cctx->appliedParams.nbWorkers == 0);
cctx->inToCompress = 0;
cctx->inBuffPos = 0;
if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
/* for small input: avoid automatic flush on reaching end of block, since
* it would require to add a 3-bytes null block to end frame
*/
cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
} else {
cctx->inBuffTarget = 0;
}
cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
cctx->streamStage = zcss_load;
cctx->frameEnded = 0;
}
return 0;
}
size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
ZSTD_inBuffer* input,
ZSTD_EndDirective endOp)
{
DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
/* check conditions */
RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
assert(cctx != NULL);
/* transparent initialization stage */
if (cctx->streamStage == zcss_init) {
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
}
/* end of transparent initialization stage */
FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
/* compression stage */
FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
DEBUGLOG(5, "completed ZSTD_compressStream2");
ZSTD_setBufferExpectations(cctx, output, input);
return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
}
size_t ZSTD_compressStream2_simpleArgs (
ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos,
ZSTD_EndDirective endOp)
{
ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
ZSTD_inBuffer input = { src, srcSize, *srcPos };
/* ZSTD_compressStream2() will check validity of dstPos and srcPos */
size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
}
size_t ZSTD_compress2(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
/* Enable stable input/output buffers. */
cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
{ size_t oPos = 0;
size_t iPos = 0;
size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
dst, dstCapacity, &oPos,
src, srcSize, &iPos,
ZSTD_e_end);
/* Reset to the original values. */
cctx->requestedParams.inBufferMode = originalInBufferMode;
cctx->requestedParams.outBufferMode = originalOutBufferMode;
FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
if (result != 0) { /* compression not completed, due to lack of output space */
assert(oPos == dstCapacity);
RETURN_ERROR(dstSize_tooSmall, "");
}
assert(iPos == srcSize); /* all input is expected consumed */
return oPos;
}
}
typedef struct {
U32 idx; /* Index in array of ZSTD_Sequence */
U32 posInSequence; /* Position within sequence at idx */
size_t posInSrc; /* Number of bytes given by sequences provided so far */
} ZSTD_sequencePosition;
/* ZSTD_validateSequence() :
* @offCode : is presumed to follow format required by ZSTD_storeSeq()
* @returns a ZSTD error code if sequence is not valid
*/
static size_t
ZSTD_validateSequence(U32 offCode, U32 matchLength,
size_t posInSrc, U32 windowLog, size_t dictSize)
{
U32 const windowSize = 1 << windowLog;
/* posInSrc represents the amount of data the decoder would decode up to this point.
* As long as the amount of data decoded is less than or equal to window size, offsets may be
* larger than the total length of output decoded in order to reference the dict, even larger than
* window size. After output surpasses windowSize, we're limited to windowSize offsets again.
*/
size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!");
RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small");
return 0;
}
/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
{
U32 offCode = STORE_OFFSET(rawOffset);
if (!ll0 && rawOffset == rep[0]) {
offCode = STORE_REPCODE_1;
} else if (rawOffset == rep[1]) {
offCode = STORE_REPCODE(2 - ll0);
} else if (rawOffset == rep[2]) {
offCode = STORE_REPCODE(3 - ll0);
} else if (ll0 && rawOffset == rep[0] - 1) {
offCode = STORE_REPCODE_3;
}
return offCode;
}
/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
* ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
*/
static size_t
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize)
{
U32 idx = seqPos->idx;
BYTE const* ip = (BYTE const*)(src);
const BYTE* const iend = ip + blockSize;
repcodes_t updatedRepcodes;
U32 dictSize;
if (cctx->cdict) {
dictSize = (U32)cctx->cdict->dictContentSize;
} else if (cctx->prefixDict.dict) {
dictSize = (U32)cctx->prefixDict.dictSize;
} else {
dictSize = 0;
}
ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
U32 const litLength = inSeqs[idx].litLength;
U32 const ll0 = (litLength == 0);
U32 const matchLength = inSeqs[idx].matchLength;
U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
cctx->appliedParams.cParams.windowLog, dictSize),
"Sequence validation failed");
}
RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
ip += matchLength + litLength;
}
ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
if (inSeqs[idx].litLength) {
DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
ip += inSeqs[idx].litLength;
seqPos->posInSrc += inSeqs[idx].litLength;
}
RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
seqPos->idx = idx+1;
return 0;
}
/* Returns the number of bytes to move the current read position back by. Only non-zero
* if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
* went wrong.
*
* This function will attempt to scan through blockSize bytes represented by the sequences
* in inSeqs, storing any (partial) sequences.
*
* Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
* avoid splitting a match, or to avoid splitting a match such that it would produce a match
* smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
*/
static size_t
ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize)
{
U32 idx = seqPos->idx;
U32 startPosInSequence = seqPos->posInSequence;
U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
size_t dictSize;
BYTE const* ip = (BYTE const*)(src);
BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
repcodes_t updatedRepcodes;
U32 bytesAdjustment = 0;
U32 finalMatchSplit = 0;
if (cctx->cdict) {
dictSize = cctx->cdict->dictContentSize;
} else if (cctx->prefixDict.dict) {
dictSize = cctx->prefixDict.dictSize;
} else {
dictSize = 0;
}
DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
const ZSTD_Sequence currSeq = inSeqs[idx];
U32 litLength = currSeq.litLength;
U32 matchLength = currSeq.matchLength;
U32 const rawOffset = currSeq.offset;
U32 offCode;
/* Modify the sequence depending on where endPosInSequence lies */
if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
if (startPosInSequence >= litLength) {
startPosInSequence -= litLength;
litLength = 0;
matchLength -= startPosInSequence;
} else {
litLength -= startPosInSequence;
}
/* Move to the next sequence */
endPosInSequence -= currSeq.litLength + currSeq.matchLength;
startPosInSequence = 0;
idx++;
} else {
/* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
does not reach the end of the match. So, we have to split the sequence */
DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
if (endPosInSequence > litLength) {
U32 firstHalfMatchLength;
litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
/* Only ever split the match if it is larger than the block size */
U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
/* Move the endPosInSequence backward so that it creates match of minMatch length */
endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
firstHalfMatchLength -= bytesAdjustment;
}
matchLength = firstHalfMatchLength;
/* Flag that we split the last match - after storing the sequence, exit the loop,
but keep the value of endPosInSequence */
finalMatchSplit = 1;
} else {
/* Move the position in sequence backwards so that we don't split match, and break to store
* the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
* should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
* would cause the first half of the match to be too small
*/
bytesAdjustment = endPosInSequence - currSeq.litLength;
endPosInSequence = currSeq.litLength;
break;
}
} else {
/* This sequence ends inside the literals, break to store the last literals */
break;
}
}
/* Check if this offset can be represented with a repcode */
{ U32 const ll0 = (litLength == 0);
offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
}
if (cctx->appliedParams.validateSequences) {
seqPos->posInSrc += litLength + matchLength;
FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
cctx->appliedParams.cParams.windowLog, dictSize),
"Sequence validation failed");
}
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
ip += matchLength + litLength;
}
DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
seqPos->idx = idx;
seqPos->posInSequence = endPosInSequence;
ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
iend -= bytesAdjustment;
if (ip != iend) {
/* Store any last literals */
U32 lastLLSize = (U32)(iend - ip);
assert(ip <= iend);
DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
seqPos->posInSrc += lastLLSize;
}
return bytesAdjustment;
}
typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize);
static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
{
ZSTD_sequenceCopier sequenceCopier = NULL;
assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
if (mode == ZSTD_sf_explicitBlockDelimiters) {
return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
} else if (mode == ZSTD_sf_noBlockDelimiters) {
return ZSTD_copySequencesToSeqStoreNoBlockDelim;
}
assert(sequenceCopier != NULL);
return sequenceCopier;
}
/* Compress, block-by-block, all of the sequences given.
*
* Returns the cumulative size of all compressed blocks (including their headers),
* otherwise a ZSTD error.
*/
static size_t
ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
const void* src, size_t srcSize)
{
size_t cSize = 0;
U32 lastBlock;
size_t blockSize;
size_t compressedSeqsSize;
size_t remaining = srcSize;
ZSTD_sequencePosition seqPos = {0, 0, 0};
BYTE const* ip = (BYTE const*)src;
BYTE* op = (BYTE*)dst;
ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
/* Special case: empty frame */
if (remaining == 0) {
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
MEM_writeLE32(op, cBlockHeader24);
op += ZSTD_blockHeaderSize;
dstCapacity -= ZSTD_blockHeaderSize;
cSize += ZSTD_blockHeaderSize;
}
while (remaining) {
size_t cBlockSize;
size_t additionalByteAdjustment;
lastBlock = remaining <= cctx->blockSize;
blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
ZSTD_resetSeqStore(&cctx->seqStore);
DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
blockSize -= additionalByteAdjustment;
/* If blocks are too small, emit as a nocompress block */
if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
cSize += cBlockSize;
ip += blockSize;
op += cBlockSize;
remaining -= blockSize;
dstCapacity -= cBlockSize;
continue;
}
compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
&cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
&cctx->appliedParams,
op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
blockSize,
cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
cctx->bmi2);
FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
if (!cctx->isFirstBlock &&
ZSTD_maybeRLE(&cctx->seqStore) &&
ZSTD_isRLE((BYTE const*)src, srcSize)) {
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
*/
compressedSeqsSize = 1;
}
if (compressedSeqsSize == 0) {
/* ZSTD_noCompressBlock writes the block header as well */
cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
} else if (compressedSeqsSize == 1) {
cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
} else {
U32 cBlockHeader;
/* Error checking and repcodes update */
ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
/* Write block header into beginning of block*/
cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
MEM_writeLE24(op, cBlockHeader);
cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
}
cSize += cBlockSize;
DEBUGLOG(4, "cSize running total: %zu", cSize);
if (lastBlock) {
break;
} else {
ip += blockSize;
op += cBlockSize;
remaining -= blockSize;
dstCapacity -= cBlockSize;
cctx->isFirstBlock = 0;
}
}
return cSize;
}
size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
const void* src, size_t srcSize)
{
BYTE* op = (BYTE*)dst;
size_t cSize = 0;
size_t compressedBlocksSize = 0;
size_t frameHeaderSize = 0;
/* Transparent initialization stage, same as compressStream2() */
DEBUGLOG(3, "ZSTD_compressSequences()");
assert(cctx != NULL);
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
/* Begin writing output, starting with frame header */
frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
op += frameHeaderSize;
dstCapacity -= frameHeaderSize;
cSize += frameHeaderSize;
if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
xxh64_update(&cctx->xxhState, src, srcSize);
}
/* cSize includes block header size and compressed sequences size */
compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
op, dstCapacity,
inSeqs, inSeqsSize,
src, srcSize);
FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
cSize += compressedBlocksSize;
dstCapacity -= compressedBlocksSize;
if (cctx->appliedParams.fParams.checksumFlag) {
U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
MEM_writeLE32((char*)dst + cSize, checksum);
cSize += 4;
}
DEBUGLOG(3, "Final compressed size: %zu", cSize);
return cSize;
}
/*====== Finalize ======*/
/*! ZSTD_flushStream() :
* @return : amount of data remaining to flush */
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = { NULL, 0, 0 };
return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
}
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
{
ZSTD_inBuffer input = { NULL, 0, 0 };
size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
/* single thread mode : attempt to calculate remaining to flush more precisely */
{ size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
return toFlush;
}
}
/*-===== Pre-defined compression levels =====-*/
#include "clevels.h"
int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
{
ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
switch (cParams.strategy) {
case ZSTD_fast:
case ZSTD_dfast:
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
break;
case ZSTD_btlazy2:
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
break;
}
return cParams;
}
static int ZSTD_dedicatedDictSearch_isSupported(
ZSTD_compressionParameters const* cParams)
{
return (cParams->strategy >= ZSTD_greedy)
&& (cParams->strategy <= ZSTD_lazy2)
&& (cParams->hashLog > cParams->chainLog)
&& (cParams->chainLog <= 24);
}
/*
* Reverses the adjustment applied to cparams when enabling dedicated dict
* search. This is used to recover the params set to be used in the working
* context. (Otherwise, those tables would also grow.)
*/
static void ZSTD_dedicatedDictSearch_revertCParams(
ZSTD_compressionParameters* cParams) {
switch (cParams->strategy) {
case ZSTD_fast:
case ZSTD_dfast:
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
if (cParams->hashLog < ZSTD_HASHLOG_MIN) {
cParams->hashLog = ZSTD_HASHLOG_MIN;
}
break;
case ZSTD_btlazy2:
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
break;
}
}
static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
switch (mode) {
case ZSTD_cpm_unknown:
case ZSTD_cpm_noAttachDict:
case ZSTD_cpm_createCDict:
break;
case ZSTD_cpm_attachDict:
dictSize = 0;
break;
default:
assert(0);
break;
}
{ int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
}
}
/*! ZSTD_getCParams_internal() :
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
* Use dictSize == 0 for unknown or unused.
* Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
{
U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
int row;
DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
/* row */
if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
else row = compressionLevel;
{ ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
/* acceleration factor */
if (compressionLevel < 0) {
int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
cp.targetLength = (unsigned)(-clampedCompressionLevel);
}
/* refine parameters based on srcSize & dictSize */
return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
}
}
/*! ZSTD_getCParams() :
* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
* Size values are optional, provide 0 if not known or unused */
ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
{
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
/*! ZSTD_getParams() :
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
ZSTD_parameters params;
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
ZSTD_memset(¶ms, 0, sizeof(params));
params.cParams = cParams;
params.fParams.contentSizeFlag = 1;
return params;
}
/*! ZSTD_getParams() :
* same idea as ZSTD_getCParams()
* @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
* Fields of `ZSTD_frameParameters` are set to default values */
ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
}
| linux-master | lib/zstd/compress/zstd_compress.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_compress_internal.h"
#include "zstd_lazy.h"
/*-*************************************
* Binary Tree search
***************************************/
static void
ZSTD_updateDUBT(ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iend,
U32 mls)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hashLog = cParams->hashLog;
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
if (idx != target)
DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
idx, target, ms->window.dictLimit);
assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
(void)iend;
assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
for ( ; idx < target ; idx++) {
size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
U32 const matchIndex = hashTable[h];
U32* const nextCandidatePtr = bt + 2*(idx&btMask);
U32* const sortMarkPtr = nextCandidatePtr + 1;
DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
hashTable[h] = idx; /* Update Hash Table */
*nextCandidatePtr = matchIndex; /* update BT like a chain */
*sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
}
ms->nextToUpdate = target;
}
/* ZSTD_insertDUBT1() :
* sort one already inserted but unsorted position
* assumption : curr >= btlow == (curr - btmask)
* doesn't fail */
static void
ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
U32 curr, const BYTE* inputEnd,
U32 nbCompares, U32 btLow,
const ZSTD_dictMode_e dictMode)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
size_t commonLengthSmaller=0, commonLengthLarger=0;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* match;
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = smallerPtr + 1;
U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
U32 dummy32; /* to be nullified at the end */
U32 const windowValid = ms->window.lowLimit;
U32 const maxDistance = 1U << cParams->windowLog;
U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
curr, dictLimit, windowLow);
assert(curr >= btLow);
assert(ip < iend); /* condition for ZSTD_count */
for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(matchIndex < curr);
/* note : all candidates are now supposed sorted,
* but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
* when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
if ( (dictMode != ZSTD_extDict)
|| (matchIndex+matchLength >= dictLimit) /* both in current segment*/
|| (curr < dictLimit) /* both in extDict */) {
const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
|| (matchIndex+matchLength >= dictLimit)) ?
base : dictBase;
assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
|| (curr < dictLimit) );
match = mBase + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
} else {
match = dictBase + matchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* preparation for next read of match[matchLength] */
}
DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
curr, matchIndex, (U32)matchLength);
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
}
if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
/* match is smaller than current */
*smallerPtr = matchIndex; /* update smaller idx */
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
matchIndex, btLow, nextPtr[1]);
smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
} else {
/* match is larger than current */
*largerPtr = matchIndex;
commonLengthLarger = matchLength;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
matchIndex, btLow, nextPtr[0]);
largerPtr = nextPtr;
matchIndex = nextPtr[0];
} }
*smallerPtr = *largerPtr = 0;
}
static size_t
ZSTD_DUBT_findBetterDictMatch (
const ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
size_t* offsetPtr,
size_t bestLength,
U32 nbCompares,
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
const ZSTD_matchState_t * const dms = ms->dictMatchState;
const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
const U32 * const dictHashTable = dms->hashTable;
U32 const hashLog = dmsCParams->hashLog;
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
U32 dictMatchIndex = dictHashTable[h];
const BYTE* const base = ms->window.base;
const BYTE* const prefixStart = base + ms->window.dictLimit;
U32 const curr = (U32)(ip-base);
const BYTE* const dictBase = dms->window.base;
const BYTE* const dictEnd = dms->window.nextSrc;
U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
U32 const dictLowLimit = dms->window.lowLimit;
U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
U32* const dictBt = dms->chainTable;
U32 const btLog = dmsCParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
size_t commonLengthSmaller=0, commonLengthLarger=0;
(void)dictMode;
assert(dictMode == ZSTD_dictMatchState);
for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {
U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
const BYTE* match = dictBase + dictMatchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
if (dictMatchIndex+matchLength >= dictHighLimit)
match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
if (matchLength > bestLength) {
U32 matchIndex = dictMatchIndex + dictIndexDelta;
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex);
bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
}
if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
break; /* drop, to guarantee consistency (miss a little bit of compression) */
}
}
if (match[matchLength] < ip[matchLength]) {
if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
} else {
/* match is larger than current */
if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
commonLengthLarger = matchLength;
dictMatchIndex = nextPtr[0];
}
}
if (bestLength >= MINMATCH) {
U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
}
return bestLength;
}
static size_t
ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
size_t* offsetPtr,
U32 const mls,
const ZSTD_dictMode_e dictMode)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hashLog = cParams->hashLog;
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
U32 matchIndex = hashTable[h];
const BYTE* const base = ms->window.base;
U32 const curr = (U32)(ip-base);
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
U32 const unsortLimit = MAX(btLow, windowLow);
U32* nextCandidate = bt + 2*(matchIndex&btMask);
U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
U32 nbCompares = 1U << cParams->searchLog;
U32 nbCandidates = nbCompares;
U32 previousCandidate = 0;
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
assert(ip <= iend-8); /* required for h calculation */
assert(dictMode != ZSTD_dedicatedDictSearch);
/* reach end of unsorted candidates list */
while ( (matchIndex > unsortLimit)
&& (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
&& (nbCandidates > 1) ) {
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
matchIndex);
*unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
previousCandidate = matchIndex;
matchIndex = *nextCandidate;
nextCandidate = bt + 2*(matchIndex&btMask);
unsortedMark = bt + 2*(matchIndex&btMask) + 1;
nbCandidates --;
}
/* nullify last candidate if it's still unsorted
* simplification, detrimental to compression ratio, beneficial for speed */
if ( (matchIndex > unsortLimit)
&& (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
matchIndex);
*nextCandidate = *unsortedMark = 0;
}
/* batch sort stacked candidates */
matchIndex = previousCandidate;
while (matchIndex) { /* will end on matchIndex == 0 */
U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
U32 const nextCandidateIdx = *nextCandidateIdxPtr;
ZSTD_insertDUBT1(ms, matchIndex, iend,
nbCandidates, unsortLimit, dictMode);
matchIndex = nextCandidateIdx;
nbCandidates++;
}
/* find longest match */
{ size_t commonLengthSmaller = 0, commonLengthLarger = 0;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = bt + 2*(curr&btMask) + 1;
U32 matchEndIdx = curr + 8 + 1;
U32 dummy32; /* to be nullified at the end */
size_t bestLength = 0;
matchIndex = hashTable[h];
hashTable[h] = curr; /* Update Hash Table */
for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
const BYTE* match;
if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
match = base + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
} else {
match = dictBase + matchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
}
if (matchLength > bestLength) {
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
if (dictMode == ZSTD_dictMatchState) {
nbCompares = 0; /* in addition to avoiding checking any
* further in this loop, make sure we
* skip checking in the dictionary. */
}
break; /* drop, to guarantee consistency (miss a little bit of compression) */
}
}
if (match[matchLength] < ip[matchLength]) {
/* match is smaller than current */
*smallerPtr = matchIndex; /* update smaller idx */
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
} else {
/* match is larger than current */
*largerPtr = matchIndex;
commonLengthLarger = matchLength;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
largerPtr = nextPtr;
matchIndex = nextPtr[0];
} }
*smallerPtr = *largerPtr = 0;
assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dictMatchState && nbCompares) {
bestLength = ZSTD_DUBT_findBetterDictMatch(
ms, ip, iend,
offsetPtr, bestLength, nbCompares,
mls, dictMode);
}
assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
if (bestLength >= MINMATCH) {
U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
}
return bestLength;
}
}
/* ZSTD_BtFindBestMatch() : Tree updater, providing best match */
FORCE_INLINE_TEMPLATE size_t
ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls /* template */,
const ZSTD_dictMode_e dictMode)
{
DEBUGLOG(7, "ZSTD_BtFindBestMatch");
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
ZSTD_updateDUBT(ms, ip, iLimit, mls);
return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
}
/* *********************************
* Dedicated dict search
***********************************/
void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
U32* const hashTable = ms->hashTable;
U32* const chainTable = ms->chainTable;
U32 const chainSize = 1 << ms->cParams.chainLog;
U32 idx = ms->nextToUpdate;
U32 const minChain = chainSize < target - idx ? target - chainSize : idx;
U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
U32 const cacheSize = bucketSize - 1;
U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
/* We know the hashtable is oversized by a factor of `bucketSize`.
* We are going to temporarily pretend `bucketSize == 1`, keeping only a
* single entry. We will use the rest of the space to construct a temporary
* chaintable.
*/
U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
U32* const tmpHashTable = hashTable;
U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
U32 hashIdx;
assert(ms->cParams.chainLog <= 24);
assert(ms->cParams.hashLog > ms->cParams.chainLog);
assert(idx != 0);
assert(tmpMinChain <= minChain);
/* fill conventional hash table and conventional chain table */
for ( ; idx < target; idx++) {
U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
if (idx >= tmpMinChain) {
tmpChainTable[idx - tmpMinChain] = hashTable[h];
}
tmpHashTable[h] = idx;
}
/* sort chains into ddss chain table */
{
U32 chainPos = 0;
for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
U32 count;
U32 countBeyondMinChain = 0;
U32 i = tmpHashTable[hashIdx];
for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
/* skip through the chain to the first position that won't be
* in the hash cache bucket */
if (i < minChain) {
countBeyondMinChain++;
}
i = tmpChainTable[i - tmpMinChain];
}
if (count == cacheSize) {
for (count = 0; count < chainLimit;) {
if (i < minChain) {
if (!i || ++countBeyondMinChain > cacheSize) {
/* only allow pulling `cacheSize` number of entries
* into the cache or chainTable beyond `minChain`,
* to replace the entries pulled out of the
* chainTable into the cache. This lets us reach
* back further without increasing the total number
* of entries in the chainTable, guaranteeing the
* DDSS chain table will fit into the space
* allocated for the regular one. */
break;
}
}
chainTable[chainPos++] = i;
count++;
if (i < tmpMinChain) {
break;
}
i = tmpChainTable[i - tmpMinChain];
}
} else {
count = 0;
}
if (count) {
tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
} else {
tmpHashTable[hashIdx] = 0;
}
}
assert(chainPos <= chainSize); /* I believe this is guaranteed... */
}
/* move chain pointers into the last entry of each hash bucket */
for (hashIdx = (1 << hashLog); hashIdx; ) {
U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
U32 const chainPackedPointer = tmpHashTable[hashIdx];
U32 i;
for (i = 0; i < cacheSize; i++) {
hashTable[bucketIdx + i] = 0;
}
hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
}
/* fill the buckets of the hash table */
for (idx = ms->nextToUpdate; idx < target; idx++) {
U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
<< ZSTD_LAZY_DDSS_BUCKET_LOG;
U32 i;
/* Shift hash cache down 1. */
for (i = cacheSize - 1; i; i--)
hashTable[h + i] = hashTable[h + i - 1];
hashTable[h] = idx;
}
ms->nextToUpdate = target;
}
/* Returns the longest match length found in the dedicated dict search structure.
* If none are longer than the argument ml, then ml will be returned.
*/
FORCE_INLINE_TEMPLATE
size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
const ZSTD_matchState_t* const dms,
const BYTE* const ip, const BYTE* const iLimit,
const BYTE* const prefixStart, const U32 curr,
const U32 dictLimit, const size_t ddsIdx) {
const U32 ddsLowestIndex = dms->window.dictLimit;
const BYTE* const ddsBase = dms->window.base;
const BYTE* const ddsEnd = dms->window.nextSrc;
const U32 ddsSize = (U32)(ddsEnd - ddsBase);
const U32 ddsIndexDelta = dictLimit - ddsSize;
const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
U32 ddsAttempt;
U32 matchIndex;
for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
}
{
U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
U32 const chainIndex = chainPackedPointer >> 8;
PREFETCH_L1(&dms->chainTable[chainIndex]);
}
for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
size_t currentMl=0;
const BYTE* match;
matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
match = ddsBase + matchIndex;
if (!matchIndex) {
return ml;
}
/* guaranteed by table construction */
(void)ddsLowestIndex;
assert(matchIndex >= ddsLowestIndex);
assert(match+4 <= ddsEnd);
if (MEM_read32(match) == MEM_read32(ip)) {
/* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
}
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
if (ip+currentMl == iLimit) {
/* best possible, avoids read overflow on next attempt */
return ml;
}
}
}
{
U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
U32 chainIndex = chainPackedPointer >> 8;
U32 const chainLength = chainPackedPointer & 0xFF;
U32 const chainAttempts = nbAttempts - ddsAttempt;
U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
U32 chainAttempt;
for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
}
for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
size_t currentMl=0;
const BYTE* match;
matchIndex = dms->chainTable[chainIndex];
match = ddsBase + matchIndex;
/* guaranteed by table construction */
assert(matchIndex >= ddsLowestIndex);
assert(match+4 <= ddsEnd);
if (MEM_read32(match) == MEM_read32(ip)) {
/* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
}
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
}
return ml;
}
/* *********************************
* Hash Chain
***********************************/
#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
/* Update chains up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
ZSTD_matchState_t* ms,
const ZSTD_compressionParameters* const cParams,
const BYTE* ip, U32 const mls)
{
U32* const hashTable = ms->hashTable;
const U32 hashLog = cParams->hashLog;
U32* const chainTable = ms->chainTable;
const U32 chainMask = (1 << cParams->chainLog) - 1;
const BYTE* const base = ms->window.base;
const U32 target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
while(idx < target) { /* catch up */
size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
hashTable[h] = idx;
idx++;
}
ms->nextToUpdate = target;
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
}
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
const ZSTD_compressionParameters* const cParams = &ms->cParams;
return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
}
/* inlining is important to hardwire a hot branch (template emulation) */
FORCE_INLINE_TEMPLATE
size_t ZSTD_HcFindBestMatch(
ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls, const ZSTD_dictMode_e dictMode)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const chainTable = ms->chainTable;
const U32 chainSize = (1 << cParams->chainLog);
const U32 chainMask = chainSize-1;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const U32 curr = (U32)(ip-base);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 lowestValid = ms->window.lowLimit;
const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
const U32 isDictionary = (ms->loadedDictEnd != 0);
const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
const U32 minChain = curr > chainSize ? curr - chainSize : 0;
U32 nbAttempts = 1U << cParams->searchLog;
size_t ml=4-1;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
U32 matchIndex;
if (dictMode == ZSTD_dedicatedDictSearch) {
const U32* entry = &dms->hashTable[ddsIdx];
PREFETCH_L1(entry);
}
/* HC4 match finder */
matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
size_t currentMl=0;
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
if (match[ml] == ip[ml]) /* potentially better */
currentMl = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex;
assert(match+4 <= dictEnd);
if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
}
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = STORE_OFFSET(curr - matchIndex);
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
if (matchIndex <= minChain) break;
matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
}
assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dedicatedDictSearch) {
ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
} else if (dictMode == ZSTD_dictMatchState) {
const U32* const dmsChainTable = dms->chainTable;
const U32 dmsChainSize = (1 << dms->cParams.chainLog);
const U32 dmsChainMask = dmsChainSize - 1;
const U32 dmsLowestIndex = dms->window.dictLimit;
const BYTE* const dmsBase = dms->window.base;
const BYTE* const dmsEnd = dms->window.nextSrc;
const U32 dmsSize = (U32)(dmsEnd - dmsBase);
const U32 dmsIndexDelta = dictLimit - dmsSize;
const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
size_t currentMl=0;
const BYTE* const match = dmsBase + matchIndex;
assert(match+4 <= dmsEnd);
if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
assert(curr > matchIndex + dmsIndexDelta);
*offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
if (matchIndex <= dmsMinChain) break;
matchIndex = dmsChainTable[matchIndex & dmsChainMask];
}
}
return ml;
}
/* *********************************
* (SIMD) Row-based matchfinder
***********************************/
/* Constants for row-based hash */
#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
/* ZSTD_VecMask_next():
* Starting from the LSB, returns the idx of the next non-zero bit.
* Basically counting the nb of trailing zeroes.
*/
static U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
assert(val != 0);
# if (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))
if (sizeof(size_t) == 4) {
U32 mostSignificantWord = (U32)(val >> 32);
U32 leastSignificantWord = (U32)val;
if (leastSignificantWord == 0) {
return 32 + (U32)__builtin_ctz(mostSignificantWord);
} else {
return (U32)__builtin_ctz(leastSignificantWord);
}
} else {
return (U32)__builtin_ctzll(val);
}
# else
/* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count
* and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer
*/
val = ~val & (val - 1ULL); /* Lowest set bit mask */
val = val - ((val >> 1) & 0x5555555555555555);
val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
# endif
}
/* ZSTD_rotateRight_*():
* Rotates a bitfield to the right by "count" bits.
* https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
*/
FORCE_INLINE_TEMPLATE
U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
assert(count < 64);
count &= 0x3F; /* for fickle pattern recognition */
return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
}
FORCE_INLINE_TEMPLATE
U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
assert(count < 32);
count &= 0x1F; /* for fickle pattern recognition */
return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
}
FORCE_INLINE_TEMPLATE
U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
assert(count < 16);
count &= 0x0F; /* for fickle pattern recognition */
return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
}
/* ZSTD_row_nextIndex():
* Returns the next index to insert at within a tagTable row, and updates the "head"
* value to reflect the update. Essentially cycles backwards from [0, {entries per row})
*/
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
U32 const next = (*tagRow - 1) & rowMask;
*tagRow = (BYTE)next;
return next;
}
/* ZSTD_isAligned():
* Checks that a pointer is aligned to "align" bytes which must be a power of 2.
*/
MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
assert((align & (align - 1)) == 0);
return (((size_t)ptr) & (align - 1)) == 0;
}
/* ZSTD_row_prefetch():
* Performs prefetching for the hashTable and tagTable at a given row.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
PREFETCH_L1(hashTable + relRow);
if (rowLog >= 5) {
PREFETCH_L1(hashTable + relRow + 16);
/* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
}
PREFETCH_L1(tagTable + relRow);
if (rowLog == 6) {
PREFETCH_L1(tagTable + relRow + 32);
}
assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
}
/* ZSTD_row_fillHashCache():
* Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
* but not beyond iLimit.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
U32 const rowLog, U32 const mls,
U32 idx, const BYTE* const iLimit)
{
U32 const* const hashTable = ms->hashTable;
U16 const* const tagTable = ms->tagTable;
U32 const hashLog = ms->rowHashLog;
U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
for (; idx < lim; ++idx) {
U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
}
DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
}
/* ZSTD_row_nextCachedHash():
* Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
* base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
*/
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
U16 const* tagTable, BYTE const* base,
U32 idx, U32 const hashLog,
U32 const rowLog, U32 const mls)
{
U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
{ U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
return hash;
}
}
/* ZSTD_row_update_internalImpl():
* Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
U32 updateStartIdx, U32 const updateEndIdx,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
{
U32* const hashTable = ms->hashTable;
U16* const tagTable = ms->tagTable;
U32 const hashLog = ms->rowHashLog;
const BYTE* const base = ms->window.base;
DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls)
: (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
U32* const row = hashTable + relRow;
BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
Explicit cast allows us to get exact desired position within each row */
U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
row[pos] = updateStartIdx;
}
}
/* ZSTD_row_update_internal():
* Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
* Skips sections of long matches as is necessary.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
{
U32 idx = ms->nextToUpdate;
const BYTE* const base = ms->window.base;
const U32 target = (U32)(ip - base);
const U32 kSkipThreshold = 384;
const U32 kMaxMatchStartPositionsToUpdate = 96;
const U32 kMaxMatchEndPositionsToUpdate = 32;
if (useCache) {
/* Only skip positions when using hash cache, i.e.
* if we are loading a dict, don't skip anything.
* If we decide to skip, then we only update a set number
* of positions at the beginning and end of the match.
*/
if (UNLIKELY(target - idx > kSkipThreshold)) {
U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
idx = target - kMaxMatchEndPositionsToUpdate;
ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
}
}
assert(target >= idx);
ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);
ms->nextToUpdate = target;
}
/* ZSTD_row_update():
* External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
* processing.
*/
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
const U32 rowMask = (1u << rowLog) - 1;
const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */);
}
#if defined(ZSTD_ARCH_X86_SSE2)
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
{
const __m128i comparisonMask = _mm_set1_epi8((char)tag);
int matches[4] = {0};
int i;
assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
for (i=0; i<nbChunks; i++) {
const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));
const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);
matches[i] = _mm_movemask_epi8(equalMask);
}
if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);
if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);
assert(nbChunks == 4);
return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);
}
#endif
/* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches
* the hash at the nth position in a row of the tagTable.
* Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield
* to match up with the actual layout of the entries within the hashTable */
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries)
{
const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET;
assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
#if defined(ZSTD_ARCH_X86_SSE2)
return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head);
#else /* SW or NEON-LE */
# if defined(ZSTD_ARCH_ARM_NEON)
/* This NEON path only works for little endian - otherwise use SWAR below */
if (MEM_isLittleEndian()) {
if (rowEntries == 16) {
const uint8x16_t chunk = vld1q_u8(src);
const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
const uint16x8_t t0 = vshlq_n_u16(equalMask, 7);
const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14));
const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14));
const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28));
const U16 hi = (U16)vgetq_lane_u8(t3, 8);
const U16 lo = (U16)vgetq_lane_u8(t3, 0);
return ZSTD_rotateRight_U16((hi << 8) | lo, head);
} else if (rowEntries == 32) {
const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src);
const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag));
const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag));
const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0));
const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1));
const uint8x8_t t0 = vreinterpret_u8_s8(pack0);
const uint8x8_t t1 = vreinterpret_u8_s8(pack1);
const uint8x8_t t2 = vsri_n_u8(t1, t0, 2);
const uint8x8x2_t t3 = vuzp_u8(t2, t0);
const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4);
const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0);
return ZSTD_rotateRight_U32(matches, head);
} else { /* rowEntries == 64 */
const uint8x16x4_t chunk = vld4q_u8(src);
const uint8x16_t dup = vdupq_n_u8(tag);
const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
return ZSTD_rotateRight_U64(matches, head);
}
}
# endif /* ZSTD_ARCH_ARM_NEON */
/* SWAR */
{ const size_t chunkSize = sizeof(size_t);
const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
const size_t xFF = ~((size_t)0);
const size_t x01 = xFF / 0xFF;
const size_t x80 = x01 << 7;
const size_t splatChar = tag * x01;
ZSTD_VecMask matches = 0;
int i = rowEntries - chunkSize;
assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
if (MEM_isLittleEndian()) { /* runtime check so have two loops */
const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
do {
size_t chunk = MEM_readST(&src[i]);
chunk ^= splatChar;
chunk = (((chunk | x80) - x01) | chunk) & x80;
matches <<= chunkSize;
matches |= (chunk * extractMagic) >> shiftAmount;
i -= chunkSize;
} while (i >= 0);
} else { /* big endian: reverse bits during extraction */
const size_t msb = xFF ^ (xFF >> 1);
const size_t extractMagic = (msb / 0x1FF) | msb;
do {
size_t chunk = MEM_readST(&src[i]);
chunk ^= splatChar;
chunk = (((chunk | x80) - x01) | chunk) & x80;
matches <<= chunkSize;
matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
i -= chunkSize;
} while (i >= 0);
}
matches = ~matches;
if (rowEntries == 16) {
return ZSTD_rotateRight_U16((U16)matches, head);
} else if (rowEntries == 32) {
return ZSTD_rotateRight_U32((U32)matches, head);
} else {
return ZSTD_rotateRight_U64((U64)matches, head);
}
}
#endif
}
/* The high-level approach of the SIMD row based match finder is as follows:
* - Figure out where to insert the new entry:
* - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
* - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
* which row to insert into.
* - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
* be considered as a circular buffer with a "head" index that resides in the tagTable.
* - Also insert the "tag" into the equivalent row and position in the tagTable.
* - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
* The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
* for alignment/performance reasons, leaving some bytes unused.
* - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
* generate a bitfield that we can cycle through to check the collisions in the hash table.
* - Pick the longest match.
*/
FORCE_INLINE_TEMPLATE
size_t ZSTD_RowFindBestMatch(
ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit,
size_t* offsetPtr,
const U32 mls, const ZSTD_dictMode_e dictMode,
const U32 rowLog)
{
U32* const hashTable = ms->hashTable;
U16* const tagTable = ms->tagTable;
U32* const hashCache = ms->hashCache;
const U32 hashLog = ms->rowHashLog;
const ZSTD_compressionParameters* const cParams = &ms->cParams;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const U32 curr = (U32)(ip-base);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 lowestValid = ms->window.lowLimit;
const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
const U32 isDictionary = (ms->loadedDictEnd != 0);
const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
const U32 rowEntries = (1U << rowLog);
const U32 rowMask = rowEntries - 1;
const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
U32 nbAttempts = 1U << cappedSearchLog;
size_t ml=4-1;
/* DMS/DDS variables that may be referenced laster */
const ZSTD_matchState_t* const dms = ms->dictMatchState;
/* Initialize the following variables to satisfy static analyzer */
size_t ddsIdx = 0;
U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
U32 dmsTag = 0;
U32* dmsRow = NULL;
BYTE* dmsTagRow = NULL;
if (dictMode == ZSTD_dedicatedDictSearch) {
const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
{ /* Prefetch DDS hashtable entry */
ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
PREFETCH_L1(&dms->hashTable[ddsIdx]);
}
ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
}
if (dictMode == ZSTD_dictMatchState) {
/* Prefetch DMS rows */
U32* const dmsHashTable = dms->hashTable;
U16* const dmsTagTable = dms->tagTable;
U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
dmsRow = dmsHashTable + dmsRelRow;
ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
}
/* Update the hashTable and tagTable up to (but not including) ip */
ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
{ /* Get the hash for ip, compute the appropriate row */
U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
U32* const row = hashTable + relRow;
BYTE* tagRow = (BYTE*)(tagTable + relRow);
U32 const head = *tagRow & rowMask;
U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
size_t numMatches = 0;
size_t currMatch = 0;
ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries);
/* Cycle through the matches and prefetch */
for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
U32 const matchIndex = row[matchPos];
assert(numMatches < rowEntries);
if (matchIndex < lowLimit)
break;
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
PREFETCH_L1(base + matchIndex);
} else {
PREFETCH_L1(dictBase + matchIndex);
}
matchBuffer[numMatches++] = matchIndex;
}
/* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
in ZSTD_row_update_internal() at the next search. */
{
U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
row[pos] = ms->nextToUpdate++;
}
/* Return the longest match */
for (; currMatch < numMatches; ++currMatch) {
U32 const matchIndex = matchBuffer[currMatch];
size_t currentMl=0;
assert(matchIndex < curr);
assert(matchIndex >= lowLimit);
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
if (match[ml] == ip[ml]) /* potentially better */
currentMl = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex;
assert(match+4 <= dictEnd);
if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
}
/* Save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = STORE_OFFSET(curr - matchIndex);
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
}
assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dedicatedDictSearch) {
ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
} else if (dictMode == ZSTD_dictMatchState) {
/* TODO: Measure and potentially add prefetching to DMS */
const U32 dmsLowestIndex = dms->window.dictLimit;
const BYTE* const dmsBase = dms->window.base;
const BYTE* const dmsEnd = dms->window.nextSrc;
const U32 dmsSize = (U32)(dmsEnd - dmsBase);
const U32 dmsIndexDelta = dictLimit - dmsSize;
{ U32 const head = *dmsTagRow & rowMask;
U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
size_t numMatches = 0;
size_t currMatch = 0;
ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries);
for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
U32 const matchIndex = dmsRow[matchPos];
if (matchIndex < dmsLowestIndex)
break;
PREFETCH_L1(dmsBase + matchIndex);
matchBuffer[numMatches++] = matchIndex;
}
/* Return the longest match */
for (; currMatch < numMatches; ++currMatch) {
U32 const matchIndex = matchBuffer[currMatch];
size_t currentMl=0;
assert(matchIndex >= dmsLowestIndex);
assert(matchIndex < curr);
{ const BYTE* const match = dmsBase + matchIndex;
assert(match+4 <= dmsEnd);
if (MEM_read32(match) == MEM_read32(ip))
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
}
if (currentMl > ml) {
ml = currentMl;
assert(curr > matchIndex + dmsIndexDelta);
*offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
if (ip+currentMl == iLimit) break;
}
}
}
}
return ml;
}
/*
* Generate search functions templated on (dictMode, mls, rowLog).
* These functions are outlined for code size & compilation time.
* ZSTD_searchMax() dispatches to the correct implementation function.
*
* TODO: The start of the search function involves loading and calculating a
* bunch of constants from the ZSTD_matchState_t. These computations could be
* done in an initialization function, and saved somewhere in the match state.
* Then we could pass a pointer to the saved state instead of the match state,
* and avoid duplicate computations.
*
* TODO: Move the match re-winding into searchMax. This improves compression
* ratio, and unlocks further simplifications with the next TODO.
*
* TODO: Try moving the repcode search into searchMax. After the re-winding
* and repcode search are in searchMax, there is no more logic in the match
* finder loop that requires knowledge about the dictMode. So we should be
* able to avoid force inlining it, and we can join the extDict loop with
* the single segment loop. It should go in searchMax instead of its own
* function to avoid having multiple virtual function calls per search.
*/
#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls
#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls
#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE
#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
ZSTD_matchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offBasePtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
} \
#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
ZSTD_matchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
} \
#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
ZSTD_matchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
} \
#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
X(dictMode, mls, 4) \
X(dictMode, mls, 5) \
X(dictMode, mls, 6)
#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
#define ZSTD_FOR_EACH_MLS(X, dictMode) \
X(dictMode, 4) \
X(dictMode, 5) \
X(dictMode, 6)
#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
X(__VA_ARGS__, noDict) \
X(__VA_ARGS__, extDict) \
X(__VA_ARGS__, dictMatchState) \
X(__VA_ARGS__, dedicatedDictSearch)
/* Generate row search fns for each combination of (dictMode, mls, rowLog) */
ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)
/* Generate binary Tree search fns for each combination of (dictMode, mls) */
ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)
/* Generate hash chain search fns for each combination of (dictMode, mls) */
ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)
typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \
case mls: \
return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \
case mls: \
return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \
case rowLog: \
return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);
#define ZSTD_SWITCH_MLS(X, dictMode) \
switch (mls) { \
ZSTD_FOR_EACH_MLS(X, dictMode) \
}
#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \
case mls: \
switch (rowLog) { \
ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
} \
ZSTD_UNREACHABLE; \
break;
#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \
switch (searchMethod) { \
case search_hashChain: \
ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
break; \
case search_binaryTree: \
ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
break; \
case search_rowHash: \
ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
break; \
} \
ZSTD_UNREACHABLE;
/*
* Searches for the longest match at @p ip.
* Dispatches to the correct implementation function based on the
* (searchMethod, dictMode, mls, rowLog). We use switch statements
* here instead of using an indirect function call through a function
* pointer because after Spectre and Meltdown mitigations, indirect
* function calls can be very costly, especially in the kernel.
*
* NOTE: dictMode and searchMethod should be templated, so those switch
* statements should be optimized out. Only the mls & rowLog switches
* should be left.
*
* @param ms The match state.
* @param ip The position to search at.
* @param iend The end of the input data.
* @param[out] offsetPtr Stores the match offset into this pointer.
* @param mls The minimum search length, in the range [4, 6].
* @param rowLog The row log (if applicable), in the range [4, 6].
* @param searchMethod The search method to use (templated).
* @param dictMode The dictMode (templated).
*
* @returns The length of the longest match found, or < mls if no match is found.
* If a match is found its offset is stored in @p offsetPtr.
*/
FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
ZSTD_matchState_t* ms,
const BYTE* ip,
const BYTE* iend,
size_t* offsetPtr,
U32 const mls,
U32 const rowLog,
searchMethod_e const searchMethod,
ZSTD_dictMode_e const dictMode)
{
if (dictMode == ZSTD_noDict) {
ZSTD_SWITCH_SEARCH_METHOD(noDict)
} else if (dictMode == ZSTD_extDict) {
ZSTD_SWITCH_SEARCH_METHOD(extDict)
} else if (dictMode == ZSTD_dictMatchState) {
ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)
} else if (dictMode == ZSTD_dedicatedDictSearch) {
ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)
}
ZSTD_UNREACHABLE;
return 0;
}
/* *******************************
* Common parser - lazy strategy
*********************************/
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_lazy_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth,
ZSTD_dictMode_e const dictMode)
{
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
const BYTE* const base = ms->window.base;
const U32 prefixLowestIndex = ms->window.dictLimit;
const BYTE* const prefixLowest = base + prefixLowestIndex;
const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
const int isDMS = dictMode == ZSTD_dictMatchState;
const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
const int isDxS = isDMS || isDDS;
const ZSTD_matchState_t* const dms = ms->dictMatchState;
const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
const U32 dictIndexDelta = isDxS ?
prefixLowestIndex - (U32)(dictEnd - dictBase) :
0;
const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
ip += (dictAndPrefixLength == 0);
if (dictMode == ZSTD_noDict) {
U32 const curr = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
U32 const maxRep = curr - windowLow;
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
}
if (isDxS) {
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
}
if (searchMethod == search_rowHash) {
ZSTD_row_fillHashCache(ms, base, rowLog,
MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
ms->nextToUpdate, ilimit);
}
/* Match Loop */
#if defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
* code alignment is perturbed. To fix the instability align the loop on 32-bytes.
*/
__asm__(".p2align 5");
#endif
while (ip < ilimit) {
size_t matchLength=0;
size_t offcode=STORE_REPCODE_1;
const BYTE* start=ip+1;
DEBUGLOG(7, "search baseline (depth 0)");
/* check repCode */
if (isDxS) {
const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
&& repIndex < prefixLowestIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
if (depth==0) goto _storeSequence;
}
}
if ( dictMode == ZSTD_noDict
&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
if (depth==0) goto _storeSequence;
}
/* first search (depth 0) */
{ size_t offsetFound = 999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, dictMode);
if (ml2 > matchLength)
matchLength = ml2, start = ip, offcode=offsetFound;
}
if (matchLength < 4) {
ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
continue;
}
/* let's try to find a better solution */
if (depth>=1)
while (ip<ilimit) {
DEBUGLOG(7, "search depth 1");
ip ++;
if ( (dictMode == ZSTD_noDict)
&& (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
int const gain2 = (int)(mlRep * 3);
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 3);
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
}
}
{ size_t offset2=999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
continue; /* search a better one */
} }
/* let's find an even better one */
if ((depth==2) && (ip<ilimit)) {
DEBUGLOG(7, "search depth 2");
ip ++;
if ( (dictMode == ZSTD_noDict)
&& (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
int const gain2 = (int)(mlRep * 4);
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
}
if (isDxS) {
const U32 repIndex = (U32)(ip - base) - offset_1;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
int const gain2 = (int)(mlRep * 4);
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
if ((mlRep >= 4) && (gain2 > gain1))
matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
}
}
{ size_t offset2=999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
continue;
} } }
break; /* nothing found : store previous solution */
}
/* NOTE:
* Pay attention that `start[-value]` can lead to strange undefined behavior
* notably if `value` is unsigned, resulting in a large positive `-value`.
*/
/* catch up */
if (STORED_IS_OFFSET(offcode)) {
if (dictMode == ZSTD_noDict) {
while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest))
&& (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */
{ start--; matchLength++; }
}
if (isDxS) {
U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
}
offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
}
/* store sequence */
_storeSequence:
{ size_t const litLength = (size_t)(start - anchor);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
anchor = ip = start + matchLength;
}
/* check immediate repcode */
if (isDxS) {
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex = current2 - offset_2;
const BYTE* repMatch = repIndex < prefixLowestIndex ?
dictBase - dictIndexDelta + repIndex :
base + repIndex;
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
ip += matchLength;
anchor = ip;
continue;
}
break;
}
}
if (dictMode == ZSTD_noDict) {
while ( ((ip <= ilimit) & (offset_2>0))
&& (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
/* store sequence */
matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
} } }
/* Save reps for next block */
rep[0] = offset_1 ? offset_1 : savedOffset;
rep[1] = offset_2 ? offset_2 : savedOffset;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
size_t ZSTD_compressBlock_btlazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
}
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_greedy_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
}
/* Row-based matchfinder */
size_t ZSTD_compressBlock_lazy2_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_lazy_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const searchMethod_e searchMethod, const U32 depth)
{
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
const BYTE* const base = ms->window.base;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* const dictBase = ms->window.dictBase;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const dictStart = dictBase + ms->window.lowLimit;
const U32 windowLog = ms->cParams.windowLog;
const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
U32 offset_1 = rep[0], offset_2 = rep[1];
DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
/* init */
ip += (ip == prefixStart);
if (searchMethod == search_rowHash) {
ZSTD_row_fillHashCache(ms, base, rowLog,
MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
ms->nextToUpdate, ilimit);
}
/* Match Loop */
#if defined(__x86_64__)
/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
* code alignment is perturbed. To fix the instability align the loop on 32-bytes.
*/
__asm__(".p2align 5");
#endif
while (ip < ilimit) {
size_t matchLength=0;
size_t offcode=STORE_REPCODE_1;
const BYTE* start=ip+1;
U32 curr = (U32)(ip-base);
/* check repCode */
{ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
const U32 repIndex = (U32)(curr+1 - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
& (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
if (depth==0) goto _storeSequence;
} }
/* first search (depth 0) */
{ size_t offsetFound = 999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, ZSTD_extDict);
if (ml2 > matchLength)
matchLength = ml2, start = ip, offcode=offsetFound;
}
if (matchLength < 4) {
ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
continue;
}
/* let's try to find a better solution */
if (depth>=1)
while (ip<ilimit) {
ip ++;
curr++;
/* check repCode */
if (offcode) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 3);
int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
if ((repLength >= 4) && (gain2 > gain1))
matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
} }
/* search match, depth 1 */
{ size_t offset2=999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
continue; /* search a better one */
} }
/* let's find an even better one */
if ((depth==2) && (ip<ilimit)) {
ip ++;
curr++;
/* check repCode */
if (offcode) {
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
const U32 repIndex = (U32)(curr - offset_1);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
int const gain2 = (int)(repLength * 4);
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
if ((repLength >= 4) && (gain2 > gain1))
matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
} }
/* search match, depth 2 */
{ size_t offset2=999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
if ((ml2 >= 4) && (gain2 > gain1)) {
matchLength = ml2, offcode = offset2, start = ip;
continue;
} } }
break; /* nothing found : store previous solution */
}
/* catch up */
if (STORED_IS_OFFSET(offcode)) {
U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
}
/* store sequence */
_storeSequence:
{ size_t const litLength = (size_t)(start - anchor);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
anchor = ip = start + matchLength;
}
/* check immediate repcode */
while (ip <= ilimit) {
const U32 repCurrent = (U32)(ip-base);
const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
const U32 repIndex = repCurrent - offset_2;
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
if (MEM_read32(ip) == MEM_read32(repMatch)) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */
ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
ip += matchLength;
anchor = ip;
continue; /* faster when present ... (?) */
}
break;
} }
/* Save reps for next block */
rep[0] = offset_1;
rep[1] = offset_2;
/* Return the last literals size */
return (size_t)(iend - anchor);
}
size_t ZSTD_compressBlock_greedy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
}
size_t ZSTD_compressBlock_lazy_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
}
size_t ZSTD_compressBlock_lazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
}
size_t ZSTD_compressBlock_btlazy2_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
}
size_t ZSTD_compressBlock_greedy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
}
size_t ZSTD_compressBlock_lazy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
}
size_t ZSTD_compressBlock_lazy2_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
}
| linux-master | lib/zstd/compress/zstd_lazy.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
* Dependencies
***************************************/
#include "zstd_compress_literals.h"
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
switch(flSize)
{
case 1: /* 2 - 1 - 5 */
ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
break;
case 2: /* 2 - 2 - 12 */
MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
break;
case 3: /* 2 - 2 - 20 */
MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
break;
default: /* not necessary : flSize is {1,2,3} */
assert(0);
}
ZSTD_memcpy(ostart + flSize, src, srcSize);
DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
return srcSize + flSize;
}
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
BYTE* const ostart = (BYTE*)dst;
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
(void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
switch(flSize)
{
case 1: /* 2 - 1 - 5 */
ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
break;
case 2: /* 2 - 2 - 12 */
MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
break;
case 3: /* 2 - 2 - 20 */
MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
break;
default: /* not necessary : flSize is {1,2,3} */
assert(0);
}
ostart[flSize] = *(const BYTE*)src;
DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
return flSize+1;
}
size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
const int bmi2,
unsigned suspectUncompressible)
{
size_t const minGain = ZSTD_minGain(srcSize, strategy);
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
BYTE* const ostart = (BYTE*)dst;
U32 singleStream = srcSize < 256;
symbolEncodingType_e hType = set_compressed;
size_t cLitSize;
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
disableLiteralCompression, (U32)srcSize);
/* Prepare nextEntropy assuming reusing the existing table */
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (disableLiteralCompression)
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
/* small ? don't even attempt compression (speed opt) */
# define COMPRESS_LITERALS_SIZE_MIN 63
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
{ HUF_repeat repeat = prevHuf->repeatMode;
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
cLitSize = singleStream ?
HUF_compress1X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
HUF_compress4X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
if (repeat != HUF_repeat_none) {
/* reused the existing table */
DEBUGLOG(5, "Reusing previous huffman table");
hType = set_repeat;
}
}
if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
if (cLitSize==1) {
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
}
if (hType == set_compressed) {
/* using a newly constructed table */
nextHuf->repeatMode = HUF_repeat_check;
}
/* Build header */
switch(lhSize)
{
case 3: /* 2 - 2 - 10 - 10 */
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
MEM_writeLE24(ostart, lhc);
break;
}
case 4: /* 2 - 2 - 14 - 14 */
{ U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
MEM_writeLE32(ostart, lhc);
break;
}
case 5: /* 2 - 2 - 18 - 18 */
{ U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
MEM_writeLE32(ostart, lhc);
ostart[4] = (BYTE)(cLitSize >> 10);
break;
}
default: /* not possible : lhSize is {3,4,5} */
assert(0);
}
DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
return lhSize+cLitSize;
}
| linux-master | lib/zstd/compress/zstd_compress_literals.c |
/* ******************************************************************
* Huffman encoder, part of New Generation Entropy library
* Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
/* **************************************************************
* Includes
****************************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
#include "../common/compiler.h"
#include "../common/bitstream.h"
#include "hist.h"
#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
#include "../common/fse.h" /* header compression */
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/error_private.h"
/* **************************************************************
* Error Management
****************************************************************/
#define HUF_isError ERR_isError
#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
/* **************************************************************
* Utils
****************************************************************/
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
{
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
}
/* *******************************************************
* HUF : Huffman block compression
*********************************************************/
#define HUF_WORKSPACE_MAX_ALIGNMENT 8
static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
{
size_t const mask = align - 1;
size_t const rem = (size_t)workspace & mask;
size_t const add = (align - rem) & mask;
BYTE* const aligned = (BYTE*)workspace + add;
assert((align & (align - 1)) == 0); /* pow 2 */
assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
if (*workspaceSizePtr >= add) {
assert(add < align);
assert(((size_t)aligned & mask) == 0);
*workspaceSizePtr -= add;
return aligned;
} else {
*workspaceSizePtr = 0;
return NULL;
}
}
/* HUF_compressWeights() :
* Same as FSE_compress(), but dedicated to huff0's weights compression.
* The use case needs much less stack memory.
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
*/
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
typedef struct {
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
unsigned count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1];
} HUF_CompressWeightsWksp;
static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
unsigned maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
/* init conditions */
if (wtSize <= 1) return 0; /* Not compressible */
/* Scan input and build symbol stats */
{ unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
}
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
/* Write table description header */
{ CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
op += hSize;
}
/* Compress */
CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
return (size_t)(op-ostart);
}
static size_t HUF_getNbBits(HUF_CElt elt)
{
return elt & 0xFF;
}
static size_t HUF_getNbBitsFast(HUF_CElt elt)
{
return elt;
}
static size_t HUF_getValue(HUF_CElt elt)
{
return elt & ~0xFF;
}
static size_t HUF_getValueFast(HUF_CElt elt)
{
return elt;
}
static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
{
assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
*elt = nbBits;
}
static void HUF_setValue(HUF_CElt* elt, size_t value)
{
size_t const nbBits = HUF_getNbBits(*elt);
if (nbBits > 0) {
assert((value >> nbBits) == 0);
*elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
}
}
typedef struct {
HUF_CompressWeightsWksp wksp;
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
} HUF_WriteCTableWksp;
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
void* workspace, size_t workspaceSize)
{
HUF_CElt const* const ct = CTable + 1;
BYTE* op = (BYTE*)dst;
U32 n;
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
/* check conditions */
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
/* convert to weight */
wksp->bitsToWeight[0] = 0;
for (n=1; n<huffLog+1; n++)
wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
for (n=0; n<maxSymbolValue; n++)
wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
/* attempt weights compression by FSE */
if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
op[0] = (BYTE)hSize;
return hSize+1;
} }
/* write raw values as 4-bits (max : 15) */
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
for (n=0; n<maxSymbolValue; n+=2)
op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
return ((maxSymbolValue+1)/2) + 1;
}
/*! HUF_writeCTable() :
`CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
{
HUF_WriteCTableWksp wksp;
return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
}
size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
HUF_CElt* const ct = CTable + 1;
/* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
*hasZeroWeights = (rankVal[0] > 0);
/* check result */
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
CTable[0] = tableLog;
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
U32 curr = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = curr;
} }
/* fill nbBits */
{ U32 n; for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
} }
/* fill val */
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
/* determine stating value per rank */
valPerRank[tableLog+1] = 0; /* for w==0 */
{ U16 min = 0;
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
/* assign value within rank, symbol order */
{ U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
}
*maxSymbolValuePtr = nbSymbols - 1;
return readSize;
}
U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
{
const HUF_CElt* ct = CTable + 1;
assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
return (U32)HUF_getNbBits(ct[symbolValue]);
}
typedef struct nodeElt_s {
U32 count;
U16 parent;
BYTE byte;
BYTE nbBits;
} nodeElt;
/*
* HUF_setMaxHeight():
* Enforces maxNbBits on the Huffman tree described in huffNode.
*
* It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
* the tree to so that it is a valid canonical Huffman tree.
*
* @pre The sum of the ranks of each symbol == 2^largestBits,
* where largestBits == huffNode[lastNonNull].nbBits.
* @post The sum of the ranks of each symbol == 2^largestBits,
* where largestBits is the return value <= maxNbBits.
*
* @param huffNode The Huffman tree modified in place to enforce maxNbBits.
* @param lastNonNull The symbol with the lowest count in the Huffman tree.
* @param maxNbBits The maximum allowed number of bits, which the Huffman tree
* may not respect. After this function the Huffman tree will
* respect maxNbBits.
* @return The maximum number of bits of the Huffman tree after adjustment,
* necessarily no more than maxNbBits.
*/
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
{
const U32 largestBits = huffNode[lastNonNull].nbBits;
/* early exit : no elt > maxNbBits, so the tree is already valid. */
if (largestBits <= maxNbBits) return largestBits;
/* there are several too large elements (at least >= 2) */
{ int totalCost = 0;
const U32 baseCost = 1 << (largestBits - maxNbBits);
int n = (int)lastNonNull;
/* Adjust any ranks > maxNbBits to maxNbBits.
* Compute totalCost, which is how far the sum of the ranks is
* we are over 2^largestBits after adjust the offending ranks.
*/
while (huffNode[n].nbBits > maxNbBits) {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
huffNode[n].nbBits = (BYTE)maxNbBits;
n--;
}
/* n stops at huffNode[n].nbBits <= maxNbBits */
assert(huffNode[n].nbBits <= maxNbBits);
/* n end at index of smallest symbol using < maxNbBits */
while (huffNode[n].nbBits == maxNbBits) --n;
/* renorm totalCost from 2^largestBits to 2^maxNbBits
* note : totalCost is necessarily a multiple of baseCost */
assert((totalCost & (baseCost - 1)) == 0);
totalCost >>= (largestBits - maxNbBits);
assert(totalCost > 0);
/* repay normalized cost */
{ U32 const noSymbol = 0xF0F0F0F0;
U32 rankLast[HUF_TABLELOG_MAX+2];
/* Get pos of last (smallest = lowest cum. count) symbol per rank */
ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
{ U32 currentNbBits = maxNbBits;
int pos;
for (pos=n ; pos >= 0; pos--) {
if (huffNode[pos].nbBits >= currentNbBits) continue;
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
rankLast[maxNbBits-currentNbBits] = (U32)pos;
} }
while (totalCost > 0) {
/* Try to reduce the next power of 2 above totalCost because we
* gain back half the rank.
*/
U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 const highPos = rankLast[nBitsToDecrease];
U32 const lowPos = rankLast[nBitsToDecrease-1];
if (highPos == noSymbol) continue;
/* Decrease highPos if no symbols of lowPos or if it is
* not cheaper to remove 2 lowPos than highPos.
*/
if (lowPos == noSymbol) break;
{ U32 const highTotal = huffNode[highPos].count;
U32 const lowTotal = 2 * huffNode[lowPos].count;
if (highTotal <= lowTotal) break;
} }
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
/* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
nBitsToDecrease++;
assert(rankLast[nBitsToDecrease] != noSymbol);
/* Increase the number of bits to gain back half the rank cost. */
totalCost -= 1 << (nBitsToDecrease-1);
huffNode[rankLast[nBitsToDecrease]].nbBits++;
/* Fix up the new rank.
* If the new rank was empty, this symbol is now its smallest.
* Otherwise, this symbol will be the largest in the new rank so no adjustment.
*/
if (rankLast[nBitsToDecrease-1] == noSymbol)
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
/* Fix up the old rank.
* If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
* it must be the only symbol in its rank, so the old rank now has no symbols.
* Otherwise, since the Huffman nodes are sorted by count, the previous position is now
* the smallest node in the rank. If the previous position belongs to a different rank,
* then the rank is now empty.
*/
if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol;
else {
rankLast[nBitsToDecrease]--;
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
}
} /* while (totalCost > 0) */
/* If we've removed too much weight, then we have to add it back.
* To avoid overshooting again, we only adjust the smallest rank.
* We take the largest nodes from the lowest rank 0 and move them
* to rank 1. There's guaranteed to be enough rank 0 symbols because
* TODO.
*/
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
/* special case : no rank 1 symbol (using maxNbBits-1);
* let's create one from largest rank 0 (using maxNbBits).
*/
if (rankLast[1] == noSymbol) {
while (huffNode[n].nbBits == maxNbBits) n--;
huffNode[n+1].nbBits--;
assert(n >= 0);
rankLast[1] = (U32)(n+1);
totalCost++;
continue;
}
huffNode[ rankLast[1] + 1 ].nbBits--;
rankLast[1]++;
totalCost ++;
}
} /* repay normalized cost */
} /* there are several too large elements (at least >= 2) */
return maxNbBits;
}
typedef struct {
U16 base;
U16 curr;
} rankPos;
typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
/* Number of buckets available for HUF_sort() */
#define RANK_POSITION_TABLE_SIZE 192
typedef struct {
huffNodeTable huffNodeTbl;
rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
} HUF_buildCTable_wksp_tables;
/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
* Strategy is to use as many buckets as possible for representing distinct
* counts while using the remainder to represent all "large" counts.
*
* To satisfy this requirement for 192 buckets, we can do the following:
* Let buckets 0-166 represent distinct counts of [0, 166]
* Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
*/
#define RANK_POSITION_MAX_COUNT_LOG 32
#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */
#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */
/* Return the appropriate bucket index for a given count. See definition of
* RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
*/
static U32 HUF_getIndex(U32 const count) {
return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
? count
: BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
}
/* Helper swap function for HUF_quickSortPartition() */
static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
nodeElt tmp = *a;
*a = *b;
*b = tmp;
}
/* Returns 0 if the huffNode array is not sorted by descending count */
MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
U32 i;
for (i = 1; i < maxSymbolValue1; ++i) {
if (huffNode[i].count > huffNode[i-1].count) {
return 0;
}
}
return 1;
}
/* Insertion sort by descending order */
HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
int i;
int const size = high-low+1;
huffNode += low;
for (i = 1; i < size; ++i) {
nodeElt const key = huffNode[i];
int j = i - 1;
while (j >= 0 && huffNode[j].count < key.count) {
huffNode[j + 1] = huffNode[j];
j--;
}
huffNode[j + 1] = key;
}
}
/* Pivot helper function for quicksort. */
static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
/* Simply select rightmost element as pivot. "Better" selectors like
* median-of-three don't experimentally appear to have any benefit.
*/
U32 const pivot = arr[high].count;
int i = low - 1;
int j = low;
for ( ; j < high; j++) {
if (arr[j].count > pivot) {
i++;
HUF_swapNodes(&arr[i], &arr[j]);
}
}
HUF_swapNodes(&arr[i + 1], &arr[high]);
return i + 1;
}
/* Classic quicksort by descending with partially iterative calls
* to reduce worst case callstack size.
*/
static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
int const kInsertionSortThreshold = 8;
if (high - low < kInsertionSortThreshold) {
HUF_insertionSort(arr, low, high);
return;
}
while (low < high) {
int const idx = HUF_quickSortPartition(arr, low, high);
if (idx - low < high - idx) {
HUF_simpleQuickSort(arr, low, idx - 1);
low = idx + 1;
} else {
HUF_simpleQuickSort(arr, idx + 1, high);
high = idx - 1;
}
}
}
/*
* HUF_sort():
* Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
* This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
*
* @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
* Must have (maxSymbolValue + 1) entries.
* @param[in] count Histogram of the symbols.
* @param[in] maxSymbolValue Maximum symbol value.
* @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
*/
static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
U32 n;
U32 const maxSymbolValue1 = maxSymbolValue+1;
/* Compute base and set curr to base.
* For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
* See HUF_getIndex to see bucketing strategy.
* We attribute each symbol to lowerRank's base value, because we want to know where
* each rank begins in the output, so for rank R we want to count ranks R+1 and above.
*/
ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
for (n = 0; n < maxSymbolValue1; ++n) {
U32 lowerRank = HUF_getIndex(count[n]);
assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
rankPosition[lowerRank].base++;
}
assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
/* Set up the rankPosition table */
for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
rankPosition[n-1].base += rankPosition[n].base;
rankPosition[n-1].curr = rankPosition[n-1].base;
}
/* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
for (n = 0; n < maxSymbolValue1; ++n) {
U32 const c = count[n];
U32 const r = HUF_getIndex(c) + 1;
U32 const pos = rankPosition[r].curr++;
assert(pos < maxSymbolValue1);
huffNode[pos].count = c;
huffNode[pos].byte = (BYTE)n;
}
/* Sort each bucket. */
for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base;
U32 const bucketStartIdx = rankPosition[n].base;
if (bucketSize > 1) {
assert(bucketStartIdx < maxSymbolValue1);
HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
}
}
assert(HUF_isSorted(huffNode, maxSymbolValue1));
}
/* HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
*/
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
/* HUF_buildTree():
* Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
*
* @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
* @param maxSymbolValue The maximum symbol value.
* @return The smallest node in the Huffman tree (by count).
*/
static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
{
nodeElt* const huffNode0 = huffNode - 1;
int nonNullRank;
int lowS, lowN;
int nodeNb = STARTNODE;
int n, nodeRoot;
/* init for parents */
nonNullRank = (int)maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
nodeNb++; lowS-=2;
for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
/* create parents */
while (nodeNb <= nodeRoot) {
int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
nodeNb++;
}
/* distribute weights (unlimited tree height) */
huffNode[nodeRoot].nbBits = 0;
for (n=nodeRoot-1; n>=STARTNODE; n--)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
for (n=0; n<=nonNullRank; n++)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
return nonNullRank;
}
/*
* HUF_buildCTableFromTree():
* Build the CTable given the Huffman tree in huffNode.
*
* @param[out] CTable The output Huffman CTable.
* @param huffNode The Huffman tree.
* @param nonNullRank The last and smallest node in the Huffman tree.
* @param maxSymbolValue The maximum symbol value.
* @param maxNbBits The exact maximum number of bits used in the Huffman tree.
*/
static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
{
HUF_CElt* const ct = CTable + 1;
/* fill result into ctable (val, nbBits) */
int n;
U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
int const alphabetSize = (int)(maxSymbolValue + 1);
for (n=0; n<=nonNullRank; n++)
nbPerRank[huffNode[n].nbBits]++;
/* determine starting value per rank */
{ U16 min = 0;
for (n=(int)maxNbBits; n>0; n--) {
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
for (n=0; n<alphabetSize; n++)
HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
for (n=0; n<alphabetSize; n++)
HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
CTable[0] = maxNbBits;
}
size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
{
HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
nodeElt* const huffNode = huffNode0+1;
int nonNullRank;
/* safety checks */
if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
return ERROR(workSpace_tooSmall);
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
return ERROR(maxSymbolValue_tooLarge);
ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
/* sort, decreasing order */
HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
/* build tree */
nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
/* enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
return maxNbBits;
}
size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
{
HUF_CElt const* ct = CTable + 1;
size_t nbBits = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
nbBits += HUF_getNbBits(ct[s]) * count[s];
}
return nbBits >> 3;
}
int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
HUF_CElt const* ct = CTable + 1;
int bad = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
}
return !bad;
}
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
/* HUF_CStream_t:
* Huffman uses its own BIT_CStream_t implementation.
* There are three major differences from BIT_CStream_t:
* 1. HUF_addBits() takes a HUF_CElt (size_t) which is
* the pair (nbBits, value) in the format:
* format:
* - Bits [0, 4) = nbBits
* - Bits [4, 64 - nbBits) = 0
* - Bits [64 - nbBits, 64) = value
* 2. The bitContainer is built from the upper bits and
* right shifted. E.g. to add a new value of N bits
* you right shift the bitContainer by N, then or in
* the new value into the N upper bits.
* 3. The bitstream has two bit containers. You can add
* bits to the second container and merge them into
* the first container.
*/
#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
typedef struct {
size_t bitContainer[2];
size_t bitPos[2];
BYTE* startPtr;
BYTE* ptr;
BYTE* endPtr;
} HUF_CStream_t;
/*! HUF_initCStream():
* Initializes the bitstream.
* @returns 0 or an error code.
*/
static size_t HUF_initCStream(HUF_CStream_t* bitC,
void* startPtr, size_t dstCapacity)
{
ZSTD_memset(bitC, 0, sizeof(*bitC));
bitC->startPtr = (BYTE*)startPtr;
bitC->ptr = bitC->startPtr;
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
return 0;
}
/*! HUF_addBits():
* Adds the symbol stored in HUF_CElt elt to the bitstream.
*
* @param elt The element we're adding. This is a (nbBits, value) pair.
* See the HUF_CStream_t docs for the format.
* @param idx Insert into the bitstream at this idx.
* @param kFast This is a template parameter. If the bitstream is guaranteed
* to have at least 4 unused bits after this call it may be 1,
* otherwise it must be 0. HUF_addBits() is faster when fast is set.
*/
FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
{
assert(idx <= 1);
assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
/* This is efficient on x86-64 with BMI2 because shrx
* only reads the low 6 bits of the register. The compiler
* knows this and elides the mask. When fast is set,
* every operation can use the same value loaded from elt.
*/
bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
/* We only read the low 8 bits of bitC->bitPos[idx] so it
* doesn't matter that the high bits have noise from the value.
*/
bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
/* The last 4-bits of elt are dirty if fast is set,
* so we must not be overwriting bits that have already been
* inserted into the bit container.
*/
#if DEBUGLEVEL >= 1
{
size_t const nbBits = HUF_getNbBits(elt);
size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1;
(void)dirtyBits;
/* Middle bits are 0. */
assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
/* We didn't overwrite any bits in the bit container. */
assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
(void)dirtyBits;
}
#endif
}
FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
{
bitC->bitContainer[1] = 0;
bitC->bitPos[1] = 0;
}
/*! HUF_mergeIndex1() :
* Merges the bit container @ index 1 into the bit container @ index 0
* and zeros the bit container @ index 1.
*/
FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
{
assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
bitC->bitContainer[0] |= bitC->bitContainer[1];
bitC->bitPos[0] += bitC->bitPos[1];
assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
}
/*! HUF_flushBits() :
* Flushes the bits in the bit container @ index 0.
*
* @post bitPos will be < 8.
* @param kFast If kFast is set then we must know a-priori that
* the bit container will not overflow.
*/
FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
{
/* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
size_t const nbBits = bitC->bitPos[0] & 0xFF;
size_t const nbBytes = nbBits >> 3;
/* The top nbBits bits of bitContainer are the ones we need. */
size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
/* Mask bitPos to account for the bytes we consumed. */
bitC->bitPos[0] &= 7;
assert(nbBits > 0);
assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitContainer);
bitC->ptr += nbBytes;
assert(!kFast || bitC->ptr <= bitC->endPtr);
if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
/* bitContainer doesn't need to be modified because the leftover
* bits are already the top bitPos bits. And we don't care about
* noise in the lower values.
*/
}
/*! HUF_endMark()
* @returns The Huffman stream end mark: A 1-bit value = 1.
*/
static HUF_CElt HUF_endMark(void)
{
HUF_CElt endMark;
HUF_setNbBits(&endMark, 1);
HUF_setValue(&endMark, 1);
return endMark;
}
/*! HUF_closeCStream() :
* @return Size of CStream, in bytes,
* or 0 if it could not fit into dstBuffer */
static size_t HUF_closeCStream(HUF_CStream_t* bitC)
{
HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
HUF_flushBits(bitC, /* kFast */ 0);
{
size_t const nbBits = bitC->bitPos[0] & 0xFF;
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
return (bitC->ptr - bitC->startPtr) + (nbBits > 0);
}
}
FORCE_INLINE_TEMPLATE void
HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
{
HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
}
FORCE_INLINE_TEMPLATE void
HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
const BYTE* ip, size_t srcSize,
const HUF_CElt* ct,
int kUnroll, int kFastFlush, int kLastFast)
{
/* Join to kUnroll */
int n = (int)srcSize;
int rem = n % kUnroll;
if (rem > 0) {
for (; rem > 0; --rem) {
HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
}
HUF_flushBits(bitC, kFastFlush);
}
assert(n % kUnroll == 0);
/* Join to 2 * kUnroll */
if (n % (2 * kUnroll)) {
int u;
for (u = 1; u < kUnroll; ++u) {
HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
}
HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
HUF_flushBits(bitC, kFastFlush);
n -= kUnroll;
}
assert(n % (2 * kUnroll) == 0);
for (; n>0; n-= 2 * kUnroll) {
/* Encode kUnroll symbols into the bitstream @ index 0. */
int u;
for (u = 1; u < kUnroll; ++u) {
HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
}
HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
HUF_flushBits(bitC, kFastFlush);
/* Encode kUnroll symbols into the bitstream @ index 1.
* This allows us to start filling the bit container
* without any data dependencies.
*/
HUF_zeroIndex1(bitC);
for (u = 1; u < kUnroll; ++u) {
HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
}
HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
/* Merge bitstream @ index 1 into the bitstream @ index 0 */
HUF_mergeIndex1(bitC);
HUF_flushBits(bitC, kFastFlush);
}
assert(n == 0);
}
/*
* Returns a tight upper bound on the output space needed by Huffman
* with 8 bytes buffer to handle over-writes. If the output is at least
* this large we don't need to do bounds checks during Huffman encoding.
*/
static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
{
return ((srcSize * tableLog) >> 3) + 8;
}
FORCE_INLINE_TEMPLATE size_t
HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
{
U32 const tableLog = (U32)CTable[0];
HUF_CElt const* ct = CTable + 1;
const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
HUF_CStream_t bitC;
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
{ size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
if (HUF_isError(initErr)) return 0; }
if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
else {
if (MEM_32bits()) {
switch (tableLog) {
case 11:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 10: ZSTD_FALLTHROUGH;
case 9: ZSTD_FALLTHROUGH;
case 8:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
case 7: ZSTD_FALLTHROUGH;
default:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
}
} else {
switch (tableLog) {
case 11:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 10:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
case 9:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 8:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 7:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 6: ZSTD_FALLTHROUGH;
default:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
}
}
}
assert(bitC.ptr <= bitC.endPtr);
return HUF_closeCStream(&bitC);
}
#if DYNAMIC_BMI2
static BMI2_TARGET_ATTRIBUTE size_t
HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
{
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
}
static size_t
HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable)
{
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
}
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable, const int bmi2)
{
if (bmi2) {
return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
}
return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
}
#else
static size_t
HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable, const int bmi2)
{
(void)bmi2;
return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
}
#endif
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
{
return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
}
size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
{
return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
}
static size_t
HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
const void* src, size_t srcSize,
const HUF_CElt* CTable, int bmi2)
{
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
if (srcSize < 12) return 0; /* no saving possible : too small input */
op += 6; /* jumpTable */
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+2, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
MEM_writeLE16(ostart+4, (U16)cSize);
op += cSize;
}
ip += segmentSize;
assert(op <= oend);
assert(ip <= iend);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
if (cSize == 0 || cSize > 65535) return 0;
op += cSize;
}
return (size_t)(op-ostart);
}
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
{
return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
}
size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
{
return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
}
typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
static size_t HUF_compressCTable_internal(
BYTE* const ostart, BYTE* op, BYTE* const oend,
const void* src, size_t srcSize,
HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
{
size_t const cSize = (nbStreams==HUF_singleStream) ?
HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
if (HUF_isError(cSize)) { return cSize; }
if (cSize==0) { return 0; } /* uncompressible */
op += cSize;
/* check compressibility */
assert(op >= ostart);
if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
return (size_t)(op-ostart);
}
typedef struct {
unsigned count[HUF_SYMBOLVALUE_MAX + 1];
HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
union {
HUF_buildCTable_wksp_tables buildCTable_wksp;
HUF_WriteCTableWksp writeCTable_wksp;
U32 hist_wksp[HIST_WKSP_SIZE_U32];
} wksps;
} HUF_compress_tables_t;
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
/* HUF_compress_internal() :
* `workSpace_align4` must be aligned on 4-bytes boundaries,
* and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
static size_t
HUF_compress_internal (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
void* workSpace, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
const int bmi2, unsigned suspectUncompressible)
{
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
/* checks & inits */
if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
/* Heuristic : If old table is valid, use it for small inputs */
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
}
/* If uncompressible data is suspected, do a smaller sampling first */
DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
size_t largestTotal = 0;
{ unsigned maxSymbolValueBegin = maxSymbolValue;
CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
largestTotal += largestBegin;
}
{ unsigned maxSymbolValueEnd = maxSymbolValue;
CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
largestTotal += largestEnd;
}
if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
/* Scan input and build symbol stats */
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
/* Check validity of previous table */
if ( repeat
&& *repeat == HUF_repeat_check
&& !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
}
/* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
maxSymbolValue, huffLog,
&table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
CHECK_F(maxBits);
huffLog = (U32)maxBits;
}
/* Zero unused symbols in CTable, so we can check it for validity */
{
size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
}
/* Write table description header */
{ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
&table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
/* Check if using previous huffman table is beneficial */
if (repeat && *repeat != HUF_repeat_none) {
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, oldHufTable, bmi2);
} }
/* Use the new huffman table */
if (hSize + 12ul >= srcSize) { return 0; }
op += hSize;
if (repeat) { *repeat = HUF_repeat_none; }
if (oldHufTable)
ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
}
return HUF_compressCTable_internal(ostart, op, oend,
src, srcSize,
nbStreams, table->CTable, bmi2);
}
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_singleStream,
workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/, 0);
}
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat,
int bmi2, unsigned suspectUncompressible)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_singleStream,
workSpace, wkspSize, hufTable,
repeat, preferRepeat, bmi2, suspectUncompressible);
}
/* HUF_compress4X_repeat():
* compress input using 4 streams.
* provide workspace to generate compression tables */
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_fourStreams,
workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/, 0);
}
/* HUF_compress4X_repeat():
* compress input using 4 streams.
* consider skipping quickly
* re-use an existing huffman compression table */
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_fourStreams,
workSpace, wkspSize,
hufTable, repeat, preferRepeat, bmi2, suspectUncompressible);
}
| linux-master | lib/zstd/compress/huf_compress.c |
/*
* Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include "zstd_compress_internal.h"
#include "hist.h"
#include "zstd_opt.h"
#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
#define ZSTD_MAX_PRICE (1<<30)
#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
/*-*************************************
* Price functions for optimal parser
***************************************/
#if 0 /* approximation at bit level (for tests) */
# define BITCOST_ACCURACY 0
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat))
#elif 0 /* fractional bit accuracy (for tests) */
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
#else /* opt==approx, ultra==accurate */
# define BITCOST_ACCURACY 8
# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
#endif
MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
{
return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
}
MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
{
U32 const stat = rawStat + 1;
U32 const hb = ZSTD_highbit32(stat);
U32 const BWeight = hb * BITCOST_MULTIPLIER;
U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
U32 const weight = BWeight + FWeight;
assert(hb + BITCOST_ACCURACY < 31);
return weight;
}
#if (DEBUGLEVEL>=2)
/* debugging function,
* @return price in bytes as fractional value
* for debug messages only */
MEM_STATIC double ZSTD_fCost(U32 price)
{
return (double)price / (BITCOST_MULTIPLIER*8);
}
#endif
static int ZSTD_compressedLiterals(optState_t const* const optPtr)
{
return optPtr->literalCompressionMode != ZSTD_ps_disable;
}
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
{
if (ZSTD_compressedLiterals(optPtr))
optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
}
static U32 sum_u32(const unsigned table[], size_t nbElts)
{
size_t n;
U32 total = 0;
for (n=0; n<nbElts; n++) {
total += table[n];
}
return total;
}
static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift)
{
U32 s, sum=0;
DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift);
assert(shift < 30);
for (s=0; s<lastEltIndex+1; s++) {
table[s] = 1 + (table[s] >> shift);
sum += table[s];
}
return sum;
}
/* ZSTD_scaleStats() :
* reduce all elements in table is sum too large
* return the resulting sum of elements */
static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
{
U32 const prevsum = sum_u32(table, lastEltIndex+1);
U32 const factor = prevsum >> logTarget;
DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
assert(logTarget < 30);
if (factor <= 1) return prevsum;
return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor));
}
/* ZSTD_rescaleFreqs() :
* if first block (detected by optPtr->litLengthSum == 0) : init statistics
* take hints from dictionary if there is one
* and init from zero if there is none,
* using src for literals stats, and baseline stats for sequence symbols
* otherwise downscale existing stats, to be used as seed for next block.
*/
static void
ZSTD_rescaleFreqs(optState_t* const optPtr,
const BYTE* const src, size_t const srcSize,
int const optLevel)
{
int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
optPtr->priceType = zop_dynamic;
if (optPtr->litLengthSum == 0) { /* first block : init */
if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
optPtr->priceType = zop_predef;
}
assert(optPtr->symbolCosts != NULL);
if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
/* huffman table presumed generated by dictionary */
optPtr->priceType = zop_dynamic;
if (compressedLiterals) {
unsigned lit;
assert(optPtr->litFreq != NULL);
optPtr->litSum = 0;
for (lit=0; lit<=MaxLit; lit++) {
U32 const scaleLog = 11; /* scale to 2K */
U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
assert(bitCost <= scaleLog);
optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
optPtr->litSum += optPtr->litFreq[lit];
} }
{ unsigned ll;
FSE_CState_t llstate;
FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
optPtr->litLengthSum = 0;
for (ll=0; ll<=MaxLL; ll++) {
U32 const scaleLog = 10; /* scale to 1K */
U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
assert(bitCost < scaleLog);
optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
optPtr->litLengthSum += optPtr->litLengthFreq[ll];
} }
{ unsigned ml;
FSE_CState_t mlstate;
FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
optPtr->matchLengthSum = 0;
for (ml=0; ml<=MaxML; ml++) {
U32 const scaleLog = 10;
U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
assert(bitCost < scaleLog);
optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
} }
{ unsigned of;
FSE_CState_t ofstate;
FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
optPtr->offCodeSum = 0;
for (of=0; of<=MaxOff; of++) {
U32 const scaleLog = 10;
U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
assert(bitCost < scaleLog);
optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
optPtr->offCodeSum += optPtr->offCodeFreq[of];
} }
} else { /* not a dictionary */
assert(optPtr->litFreq != NULL);
if (compressedLiterals) {
unsigned lit = MaxLit;
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8);
}
{ unsigned const baseLLfreqs[MaxLL+1] = {
4, 2, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1
};
ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
}
{ unsigned ml;
for (ml=0; ml<=MaxML; ml++)
optPtr->matchLengthFreq[ml] = 1;
}
optPtr->matchLengthSum = MaxML+1;
{ unsigned const baseOFCfreqs[MaxOff+1] = {
6, 2, 1, 1, 2, 3, 4, 4,
4, 3, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1
};
ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
}
}
} else { /* new block : re-use previous statistics, scaled down */
if (compressedLiterals)
optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
}
ZSTD_setBasePrices(optPtr, optLevel);
}
/* ZSTD_rawLiteralsCost() :
* price of literals (only) in specified segment (which length can be 0).
* does not include price of literalLength symbol */
static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
const optState_t* const optPtr,
int optLevel)
{
if (litLength == 0) return 0;
if (!ZSTD_compressedLiterals(optPtr))
return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
if (optPtr->priceType == zop_predef)
return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
/* dynamic statistics */
{ U32 price = litLength * optPtr->litSumBasePrice;
U32 u;
for (u=0; u < litLength; u++) {
assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
}
return price;
}
}
/* ZSTD_litLengthPrice() :
* cost of literalLength symbol */
static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
{
assert(litLength <= ZSTD_BLOCKSIZE_MAX);
if (optPtr->priceType == zop_predef)
return WEIGHT(litLength, optLevel);
/* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
* because it isn't representable in the zstd format. So instead just
* call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block
* would be all literals.
*/
if (litLength == ZSTD_BLOCKSIZE_MAX)
return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
/* dynamic statistics */
{ U32 const llCode = ZSTD_LLcode(litLength);
return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ optPtr->litLengthSumBasePrice
- WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
}
}
/* ZSTD_getMatchPrice() :
* Provides the cost of the match part (offset + matchLength) of a sequence
* Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
* @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
* @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
*/
FORCE_INLINE_TEMPLATE U32
ZSTD_getMatchPrice(U32 const offcode,
U32 const matchLength,
const optState_t* const optPtr,
int const optLevel)
{
U32 price;
U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
U32 const mlBase = matchLength - MINMATCH;
assert(matchLength >= MINMATCH);
if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
/* dynamic statistics */
price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
if ((optLevel<2) /*static*/ && offCode >= 20)
price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
/* match Length */
{ U32 const mlCode = ZSTD_MLcode(mlBase);
price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
}
price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
return price;
}
/* ZSTD_updateStats() :
* assumption : literals + litLengtn <= iend */
static void ZSTD_updateStats(optState_t* const optPtr,
U32 litLength, const BYTE* literals,
U32 offsetCode, U32 matchLength)
{
/* literals */
if (ZSTD_compressedLiterals(optPtr)) {
U32 u;
for (u=0; u < litLength; u++)
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
}
/* literal Length */
{ U32 const llCode = ZSTD_LLcode(litLength);
optPtr->litLengthFreq[llCode]++;
optPtr->litLengthSum++;
}
/* offset code : expected to follow storeSeq() numeric representation */
{ U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
assert(offCode <= MaxOff);
optPtr->offCodeFreq[offCode]++;
optPtr->offCodeSum++;
}
/* match Length */
{ U32 const mlBase = matchLength - MINMATCH;
U32 const mlCode = ZSTD_MLcode(mlBase);
optPtr->matchLengthFreq[mlCode]++;
optPtr->matchLengthSum++;
}
}
/* ZSTD_readMINMATCH() :
* function safe only for comparisons
* assumption : memPtr must be at least 4 bytes before end of buffer */
MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
{
switch (length)
{
default :
case 4 : return MEM_read32(memPtr);
case 3 : if (MEM_isLittleEndian())
return MEM_read32(memPtr)<<8;
else
return MEM_read32(memPtr)>>8;
}
}
/* Update hashTable3 up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip)
{
U32* const hashTable3 = ms->hashTable3;
U32 const hashLog3 = ms->hashLog3;
const BYTE* const base = ms->window.base;
U32 idx = *nextToUpdate3;
U32 const target = (U32)(ip - base);
size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
assert(hashLog3 > 0);
while(idx < target) {
hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
idx++;
}
*nextToUpdate3 = target;
return hashTable3[hash3];
}
/*-*************************************
* Binary Tree search
***************************************/
/* ZSTD_insertBt1() : add one or multiple positions to tree.
* @param ip assumed <= iend-8 .
* @param target The target of ZSTD_updateTree_internal() - we are filling to this position
* @return : nb of positions added */
static U32 ZSTD_insertBt1(
const ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
U32 const target,
U32 const mls, const int extDict)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hashLog = cParams->hashLog;
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask = (1 << btLog) - 1;
U32 matchIndex = hashTable[h];
size_t commonLengthSmaller=0, commonLengthLarger=0;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* match;
const U32 curr = (U32)(ip-base);
const U32 btLow = btMask >= curr ? 0 : curr - btMask;
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = smallerPtr + 1;
U32 dummy32; /* to be nullified at the end */
/* windowLow is based on target because
* we only need positions that will be in the window at the end of the tree update.
*/
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
U32 matchEndIdx = curr+8+1;
size_t bestLength = 8;
U32 nbCompares = 1U << cParams->searchLog;
#ifdef ZSTD_C_PREDICT
U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
predictedSmall += (predictedSmall>0);
predictedLarge += (predictedLarge>0);
#endif /* ZSTD_C_PREDICT */
DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
assert(curr <= target);
assert(ip <= iend-8); /* required for h calculation */
hashTable[h] = curr; /* Update Hash Table */
assert(windowLow > 0);
for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(matchIndex < curr);
#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
if (matchIndex == predictedSmall) {
/* no need to check length, result known */
*smallerPtr = matchIndex;
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
predictedSmall = predictPtr[1] + (predictPtr[1]>0);
continue;
}
if (matchIndex == predictedLarge) {
*largerPtr = matchIndex;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
largerPtr = nextPtr;
matchIndex = nextPtr[0];
predictedLarge = predictPtr[0] + (predictPtr[0]>0);
continue;
}
#endif
if (!extDict || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
match = base + matchIndex;
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
} else {
match = dictBase + matchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
}
if (matchLength > bestLength) {
bestLength = matchLength;
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
}
if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
}
if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
/* match is smaller than current */
*smallerPtr = matchIndex; /* update smaller idx */
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
} else {
/* match is larger than current */
*largerPtr = matchIndex;
commonLengthLarger = matchLength;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
largerPtr = nextPtr;
matchIndex = nextPtr[0];
} }
*smallerPtr = *largerPtr = 0;
{ U32 positions = 0;
if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
assert(matchEndIdx > curr + 8);
return MAX(positions, matchEndIdx - (curr + 8));
}
}
FORCE_INLINE_TEMPLATE
void ZSTD_updateTree_internal(
ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend,
const U32 mls, const ZSTD_dictMode_e dictMode)
{
const BYTE* const base = ms->window.base;
U32 const target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
idx, target, dictMode);
while(idx < target) {
U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
assert(idx < (U32)(idx + forward));
idx += forward;
}
assert((size_t)(ip - base) <= (size_t)(U32)(-1));
assert((size_t)(iend - base) <= (size_t)(U32)(-1));
ms->nextToUpdate = target;
}
void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
}
FORCE_INLINE_TEMPLATE
U32 ZSTD_insertBtAndGetAllMatches (
ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
const U32 rep[ZSTD_REP_NUM],
U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
const U32 lengthToBeat,
U32 const mls /* template */)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
const BYTE* const base = ms->window.base;
U32 const curr = (U32)(ip-base);
U32 const hashLog = cParams->hashLog;
U32 const minMatch = (mls==3) ? 3 : 4;
U32* const hashTable = ms->hashTable;
size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
U32 matchIndex = hashTable[h];
U32* const bt = ms->chainTable;
U32 const btLog = cParams->chainLog - 1;
U32 const btMask= (1U << btLog) - 1;
size_t commonLengthSmaller=0, commonLengthLarger=0;
const BYTE* const dictBase = ms->window.dictBase;
U32 const dictLimit = ms->window.dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
U32 const matchLow = windowLow ? windowLow : 1;
U32* smallerPtr = bt + 2*(curr&btMask);
U32* largerPtr = bt + 2*(curr&btMask) + 1;
U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
U32 dummy32; /* to be nullified at the end */
U32 mnum = 0;
U32 nbCompares = 1U << cParams->searchLog;
const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
const ZSTD_compressionParameters* const dmsCParams =
dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
size_t bestLength = lengthToBeat-1;
DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
/* check repCode */
assert(ll0 <= 1); /* necessarily 1 or 0 */
{ U32 const lastR = ZSTD_REP_NUM + ll0;
U32 repCode;
for (repCode = ll0; repCode < lastR; repCode++) {
U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
U32 const repIndex = curr - repOffset;
U32 repLen = 0;
assert(curr >= dictLimit);
if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
/* We must validate the repcode offset because when we're using a dictionary the
* valid offset range shrinks when the dictionary goes out of bounds.
*/
if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
}
} else { /* repIndex < dictLimit || repIndex >= curr */
const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
dmsBase + repIndex - dmsIndexDelta :
dictBase + repIndex;
assert(curr >= windowLow);
if ( dictMode == ZSTD_extDict
&& ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
& (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
}
if (dictMode == ZSTD_dictMatchState
&& ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
& ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
&& (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
} }
/* save longer solution */
if (repLen > bestLength) {
DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
repCode, ll0, repOffset, repLen);
bestLength = repLen;
matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
matches[mnum].len = (U32)repLen;
mnum++;
if ( (repLen > sufficient_len)
| (ip+repLen == iLimit) ) { /* best possible */
return mnum;
} } } }
/* HC3 match finder */
if ((mls == 3) /*static*/ && (bestLength < mls)) {
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
if ((matchIndex3 >= matchLow)
& (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
size_t mlen;
if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
const BYTE* const match = base + matchIndex3;
mlen = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex3;
mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
}
/* save best solution */
if (mlen >= mls /* == 3 > bestLength */) {
DEBUGLOG(8, "found small match with hlog3, of length %u",
(U32)mlen);
bestLength = mlen;
assert(curr > matchIndex3);
assert(mnum==0); /* no prior solution */
matches[0].off = STORE_OFFSET(curr - matchIndex3);
matches[0].len = (U32)mlen;
mnum = 1;
if ( (mlen > sufficient_len) |
(ip+mlen == iLimit) ) { /* best possible length */
ms->nextToUpdate = curr+1; /* skip insertion */
return 1;
} } }
/* no dictMatchState lookup: dicts don't have a populated HC3 table */
} /* if (mls == 3) */
hashTable[h] = curr; /* Update Hash Table */
for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
U32* const nextPtr = bt + 2*(matchIndex & btMask);
const BYTE* match;
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
assert(curr > matchIndex);
if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
match = base + matchIndex;
if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
} else {
match = dictBase + matchIndex;
assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* prepare for match[matchLength] read */
}
if (matchLength > bestLength) {
DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
(U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
assert(matchEndIdx > matchIndex);
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
matches[mnum].off = STORE_OFFSET(curr - matchIndex);
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
break; /* drop, to preserve bt consistency (miss a little bit of compression) */
} }
if (match[matchLength] < ip[matchLength]) {
/* match smaller than current */
*smallerPtr = matchIndex; /* update smaller idx */
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
} else {
*largerPtr = matchIndex;
commonLengthLarger = matchLength;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
largerPtr = nextPtr;
matchIndex = nextPtr[0];
} }
*smallerPtr = *largerPtr = 0;
assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dictMatchState && nbCompares) {
size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
U32 dictMatchIndex = dms->hashTable[dmsH];
const U32* const dmsBt = dms->chainTable;
commonLengthSmaller = commonLengthLarger = 0;
for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
const BYTE* match = dmsBase + dictMatchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
if (dictMatchIndex+matchLength >= dmsHighLimit)
match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
if (matchLength > bestLength) {
matchIndex = dictMatchIndex + dmsIndexDelta;
DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
(U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
if (matchLength > matchEndIdx - matchIndex)
matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
matches[mnum].off = STORE_OFFSET(curr - matchIndex);
matches[mnum].len = (U32)matchLength;
mnum++;
if ( (matchLength > ZSTD_OPT_NUM)
| (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
break; /* drop, to guarantee consistency (miss a little bit of compression) */
} }
if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
if (match[matchLength] < ip[matchLength]) {
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
} else {
/* match is larger than current */
commonLengthLarger = matchLength;
dictMatchIndex = nextPtr[0];
} } } /* if (dictMode == ZSTD_dictMatchState) */
assert(matchEndIdx > curr+8);
ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
return mnum;
}
typedef U32 (*ZSTD_getAllMatchesFn)(
ZSTD_match_t*,
ZSTD_matchState_t*,
U32*,
const BYTE*,
const BYTE*,
const U32 rep[ZSTD_REP_NUM],
U32 const ll0,
U32 const lengthToBeat);
FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
ZSTD_match_t* matches,
ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* ip,
const BYTE* const iHighLimit,
const U32 rep[ZSTD_REP_NUM],
U32 const ll0,
U32 const lengthToBeat,
const ZSTD_dictMode_e dictMode,
const U32 mls)
{
assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
if (ip < ms->window.base + ms->nextToUpdate)
return 0; /* skipped area */
ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
}
#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
ZSTD_match_t* matches, \
ZSTD_matchState_t* ms, \
U32* nextToUpdate3, \
const BYTE* ip, \
const BYTE* const iHighLimit, \
const U32 rep[ZSTD_REP_NUM], \
U32 const ll0, \
U32 const lengthToBeat) \
{ \
return ZSTD_btGetAllMatches_internal( \
matches, ms, nextToUpdate3, ip, iHighLimit, \
rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
}
#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
{ \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
}
static ZSTD_getAllMatchesFn
ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
{
ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
};
U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
assert((U32)dictMode < 3);
assert(mls - 3 < 4);
return getAllMatchesFns[(int)dictMode][mls - 3];
}
/* ***********************
* LDM helper functions *
*************************/
/* Struct containing info needed to make decision about ldm inclusion */
typedef struct {
rawSeqStore_t seqStore; /* External match candidates store for this block */
U32 startPosInBlock; /* Start position of the current match candidate */
U32 endPosInBlock; /* End position of the current match candidate */
U32 offset; /* Offset of the match candidate */
} ZSTD_optLdm_t;
/* ZSTD_optLdm_skipRawSeqStoreBytes():
* Moves forward in @rawSeqStore by @nbBytes,
* which will update the fields 'pos' and 'posInSequence'.
*/
static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
{
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
if (currPos >= currSeq.litLength + currSeq.matchLength) {
currPos -= currSeq.litLength + currSeq.matchLength;
rawSeqStore->pos++;
} else {
rawSeqStore->posInSequence = currPos;
break;
}
}
if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
rawSeqStore->posInSequence = 0;
}
}
/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
* Calculates the beginning and end of the next match in the current block.
* Updates 'pos' and 'posInSequence' of the ldmSeqStore.
*/
static void
ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
U32 blockBytesRemaining)
{
rawSeq currSeq;
U32 currBlockEndPos;
U32 literalsBytesRemaining;
U32 matchBytesRemaining;
/* Setting match end position to MAX to ensure we never use an LDM during this block */
if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
optLdm->startPosInBlock = UINT_MAX;
optLdm->endPosInBlock = UINT_MAX;
return;
}
/* Calculate appropriate bytes left in matchLength and litLength
* after adjusting based on ldmSeqStore->posInSequence */
currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
currBlockEndPos = currPosInBlock + blockBytesRemaining;
literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
0;
matchBytesRemaining = (literalsBytesRemaining == 0) ?
currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
currSeq.matchLength;
/* If there are more literal bytes than bytes remaining in block, no ldm is possible */
if (literalsBytesRemaining >= blockBytesRemaining) {
optLdm->startPosInBlock = UINT_MAX;
optLdm->endPosInBlock = UINT_MAX;
ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
return;
}
/* Matches may be < MINMATCH by this process. In that case, we will reject them
when we are deciding whether or not to add the ldm */
optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
optLdm->offset = currSeq.offset;
if (optLdm->endPosInBlock > currBlockEndPos) {
/* Match ends after the block ends, we can't use the whole match */
optLdm->endPosInBlock = currBlockEndPos;
ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
} else {
/* Consume nb of bytes equal to size of sequence left */
ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
}
}
/* ZSTD_optLdm_maybeAddMatch():
* Adds a match if it's long enough,
* based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
* into 'matches'. Maintains the correct ordering of 'matches'.
*/
static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
{
U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
/* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
/* Ensure that current block position is not outside of the match */
if (currPosInBlock < optLdm->startPosInBlock
|| currPosInBlock >= optLdm->endPosInBlock
|| candidateMatchLength < MINMATCH) {
return;
}
if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
U32 const candidateOffCode = STORE_OFFSET(optLdm->offset);
DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
candidateOffCode, candidateMatchLength, currPosInBlock);
matches[*nbMatches].len = candidateMatchLength;
matches[*nbMatches].off = candidateOffCode;
(*nbMatches)++;
}
}
/* ZSTD_optLdm_processMatchCandidate():
* Wrapper function to update ldm seq store and call ldm functions as necessary.
*/
static void
ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
ZSTD_match_t* matches, U32* nbMatches,
U32 currPosInBlock, U32 remainingBytes)
{
if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
return;
}
if (currPosInBlock >= optLdm->endPosInBlock) {
if (currPosInBlock > optLdm->endPosInBlock) {
/* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
* at the end of a match from the ldm seq store, and will often be some bytes
* over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
*/
U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
}
ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
}
ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
}
/*-*******************************
* Optimal parser
*********************************/
static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
{
return sol.litlen + sol.mlen;
}
#if 0 /* debug */
static void
listStats(const U32* table, int lastEltID)
{
int const nbElts = lastEltID + 1;
int enb;
for (enb=0; enb < nbElts; enb++) {
(void)table;
/* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
RAWLOG(2, "%4i,", table[enb]);
}
RAWLOG(2, " \n");
}
#endif
FORCE_INLINE_TEMPLATE size_t
ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize,
const int optLevel,
const ZSTD_dictMode_e dictMode)
{
optState_t* const optStatePtr = &ms->opt;
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ms->window.base;
const BYTE* const prefixStart = base + ms->window.dictLimit;
const ZSTD_compressionParameters* const cParams = &ms->cParams;
ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
U32 nextToUpdate3 = ms->nextToUpdate;
ZSTD_optimal_t* const opt = optStatePtr->priceTable;
ZSTD_match_t* const matches = optStatePtr->matchTable;
ZSTD_optimal_t lastSequence;
ZSTD_optLdm_t optLdm;
optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
/* init */
DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
(U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
assert(optLevel <= 2);
ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
ip += (ip==prefixStart);
/* Match Loop */
while (ip < ilimit) {
U32 cur, last_pos = 0;
/* find first match */
{ U32 const litlen = (U32)(ip - anchor);
U32 const ll0 = !litlen;
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
(U32)(ip-istart), (U32)(iend - ip));
if (!nbMatches) { ip++; continue; }
/* initialize opt[0] */
{ U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
opt[0].mlen = 0; /* means is_a_literal */
opt[0].litlen = litlen;
/* We don't need to include the actual price of the literals because
* it is static for the duration of the forward pass, and is included
* in every price. We include the literal length to avoid negative
* prices when we subtract the previous literal length.
*/
opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
/* large match -> immediate encoding */
{ U32 const maxML = matches[nbMatches-1].len;
U32 const maxOffcode = matches[nbMatches-1].off;
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
if (maxML > sufficient_len) {
lastSequence.litlen = litlen;
lastSequence.mlen = maxML;
lastSequence.off = maxOffcode;
DEBUGLOG(6, "large match (%u>%u), immediate encoding",
maxML, sufficient_len);
cur = 0;
last_pos = ZSTD_totalLen(lastSequence);
goto _shortestPath;
} }
/* set prices for first matches starting position == 0 */
assert(opt[0].price >= 0);
{ U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
U32 pos;
U32 matchNb;
for (pos = 1; pos < minMatch; pos++) {
opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
}
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
U32 const offcode = matches[matchNb].off;
U32 const end = matches[matchNb].len;
for ( ; pos <= end ; pos++ ) {
U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
U32 const sequencePrice = literalsPrice + matchPrice;
DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
pos, ZSTD_fCost(sequencePrice));
opt[pos].mlen = pos;
opt[pos].off = offcode;
opt[pos].litlen = litlen;
opt[pos].price = (int)sequencePrice;
} }
last_pos = pos-1;
}
}
/* check further positions */
for (cur = 1; cur <= last_pos; cur++) {
const BYTE* const inr = ip + cur;
assert(cur < ZSTD_OPT_NUM);
DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
/* Fix current position with one literal if cheaper */
{ U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
int const price = opt[cur-1].price
+ (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
- (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
assert(price < 1000000000); /* overflow check */
if (price <= opt[cur].price) {
DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
opt[cur].mlen = 0;
opt[cur].off = 0;
opt[cur].litlen = litlen;
opt[cur].price = price;
} else {
DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
}
}
/* Set the repcodes of the current position. We must do it here
* because we rely on the repcodes of the 2nd to last sequence being
* correct to set the next chunks repcodes during the backward
* traversal.
*/
ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
assert(cur >= opt[cur].mlen);
if (opt[cur].mlen != 0) {
U32 const prev = cur - opt[cur].mlen;
repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
} else {
ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
}
/* last match must start at a minimum distance of 8 from oend */
if (inr > ilimit) continue;
if (cur == last_pos) break;
if ( (optLevel==0) /*static_test*/
&& (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
}
assert(opt[cur].price >= 0);
{ U32 const ll0 = (opt[cur].mlen != 0);
U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
U32 const previousPrice = (U32)opt[cur].price;
U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
U32 matchNb;
ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
(U32)(inr-istart), (U32)(iend-inr));
if (!nbMatches) {
DEBUGLOG(7, "rPos:%u : no match found", cur);
continue;
}
{ U32 const maxML = matches[nbMatches-1].len;
DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
inr-istart, cur, nbMatches, maxML);
if ( (maxML > sufficient_len)
|| (cur + maxML >= ZSTD_OPT_NUM) ) {
lastSequence.mlen = maxML;
lastSequence.off = matches[nbMatches-1].off;
lastSequence.litlen = litlen;
cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
last_pos = cur + ZSTD_totalLen(lastSequence);
if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
goto _shortestPath;
} }
/* set prices using matches found at position == cur */
for (matchNb = 0; matchNb < nbMatches; matchNb++) {
U32 const offset = matches[matchNb].off;
U32 const lastML = matches[matchNb].len;
U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
U32 mlen;
DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
matchNb, matches[matchNb].off, lastML, litlen);
for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
U32 const pos = cur + mlen;
int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
if ((pos > last_pos) || (price < opt[pos].price)) {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
opt[pos].mlen = mlen;
opt[pos].off = offset;
opt[pos].litlen = litlen;
opt[pos].price = price;
} else {
DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
}
} } }
} /* for (cur = 1; cur <= last_pos; cur++) */
lastSequence = opt[last_pos];
cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
assert(cur < ZSTD_OPT_NUM); /* control overflow*/
_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
assert(opt[0].mlen == 0);
/* Set the next chunk's repcodes based on the repcodes of the beginning
* of the last match, and the last sequence. This avoids us having to
* update them while traversing the sequences.
*/
if (lastSequence.mlen != 0) {
repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
ZSTD_memcpy(rep, &reps, sizeof(reps));
} else {
ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
}
{ U32 const storeEnd = cur + 1;
U32 storeStart = storeEnd;
U32 seqPos = cur;
DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
last_pos, cur); (void)last_pos;
assert(storeEnd < ZSTD_OPT_NUM);
DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
opt[storeEnd] = lastSequence;
while (seqPos > 0) {
U32 const backDist = ZSTD_totalLen(opt[seqPos]);
storeStart--;
DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
opt[storeStart] = opt[seqPos];
seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
}
/* save sequences */
DEBUGLOG(6, "sending selected sequences into seqStore")
{ U32 storePos;
for (storePos=storeStart; storePos <= storeEnd; storePos++) {
U32 const llen = opt[storePos].litlen;
U32 const mlen = opt[storePos].mlen;
U32 const offCode = opt[storePos].off;
U32 const advance = llen + mlen;
DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
anchor - istart, (unsigned)llen, (unsigned)mlen);
if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
assert(storePos == storeEnd); /* must be last sequence */
ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
continue; /* will finish */
}
assert(anchor + llen <= iend);
ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
anchor += advance;
ip = anchor;
} }
ZSTD_setBasePrices(optStatePtr, optLevel);
}
} /* while (ip < ilimit) */
/* Return the last literals size */
return (size_t)(iend - anchor);
}
static size_t ZSTD_compressBlock_opt0(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
}
static size_t ZSTD_compressBlock_opt2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
}
size_t ZSTD_compressBlock_btopt(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btopt");
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
/* ZSTD_initStats_ultra():
* make a first compression pass, just to seed stats with more accurate starting values.
* only works on first block, with no dictionary and no ldm.
* this function cannot error, hence its contract must be respected.
*/
static void
ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
assert(ms->opt.litLengthSum == 0); /* first block */
assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
/* invalidate first scan from history */
ZSTD_resetSeqStore(seqStore);
ms->window.base -= srcSize;
ms->window.dictLimit += (U32)srcSize;
ms->window.lowLimit = ms->window.dictLimit;
ms->nextToUpdate = ms->window.dictLimit;
}
size_t ZSTD_compressBlock_btultra(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
size_t ZSTD_compressBlock_btultra2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
U32 const curr = (U32)((const BYTE*)src - ms->window.base);
DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
/* 2-pass strategy:
* this strategy makes a first pass over first block to collect statistics
* and seed next round's statistics with it.
* After 1st pass, function forgets everything, and starts a new block.
* Consequently, this can only work if no data has been previously loaded in tables,
* aka, no dictionary, no prefix, no ldm preprocessing.
* The compression ratio gain is generally small (~0.5% on first block),
* the cost is 2x cpu time on first block. */
assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
if ( (ms->opt.litLengthSum==0) /* first block */
&& (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
&& (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
&& (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
&& (srcSize > ZSTD_PREDEF_THRESHOLD)
) {
ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
}
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
}
size_t ZSTD_compressBlock_btopt_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_btultra_dictMatchState(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_btopt_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
}
size_t ZSTD_compressBlock_btultra_extDict(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{
return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
}
/* note : no btultra2 variant for extDict nor dictMatchState,
* because btultra2 is not meant to work with dictionaries
* and is only specific for the first block (no prefix) */
| linux-master | lib/zstd/compress/zstd_opt.c |
/* ******************************************************************
* FSE : Finite State Entropy encoder
* Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* **************************************************************
* Includes
****************************************************************/
#include "../common/compiler.h"
#include "../common/mem.h" /* U32, U16, etc. */
#include "../common/debug.h" /* assert, DEBUGLOG */
#include "hist.h" /* HIST_count_wksp */
#include "../common/bitstream.h"
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#include "../common/error_private.h"
#define ZSTD_DEPS_NEED_MALLOC
#define ZSTD_DEPS_NEED_MATH64
#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
/* **************************************************************
* Error Management
****************************************************************/
#define FSE_isError ERR_isError
/* **************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
/* Function templates */
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
*/
size_t FSE_buildCTable_wksp(FSE_CTable* ct,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize)
{
U32 const tableSize = 1 << tableLog;
U32 const tableMask = tableSize - 1;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
U32 const step = FSE_TABLESTEP(tableSize);
U32 const maxSV1 = maxSymbolValue+1;
U16* cumul = (U16*)workSpace; /* size = maxSV1 */
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
U32 highThreshold = tableSize-1;
assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
/* CTable header */
tableU16[-2] = (U16) tableLog;
tableU16[-1] = (U16) maxSymbolValue;
assert(tableLog < 16); /* required for threshold strategy to work */
/* For explanations on how to distribute symbol values over the table :
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
#ifdef __clang_analyzer__
ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
#endif
/* symbol start positions */
{ U32 u;
cumul[0] = 0;
for (u=1; u <= maxSV1; u++) {
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
cumul[u] = cumul[u-1] + 1;
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
} else {
assert(normalizedCounter[u-1] >= 0);
cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
assert(cumul[u] >= cumul[u-1]); /* no overflow */
} }
cumul[maxSV1] = (U16)(tableSize+1);
}
/* Spread symbols */
if (highThreshold == tableSize - 1) {
/* Case for no low prob count symbols. Lay down 8 bytes at a time
* to reduce branch misses since we are operating on a small block
*/
BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
{ U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
for (s=0; s<maxSV1; ++s, sv += add) {
int i;
int const n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
assert(n>=0);
pos += (size_t)n;
}
}
/* Spread symbols across the table. Lack of lowprob symbols means that
* we don't need variable sized inner loop, so we can unroll the loop and
* reduce branch misses.
*/
{ size_t position = 0;
size_t s;
size_t const unroll = 2; /* Experimentally determined optimal unroll */
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
for (s = 0; s < (size_t)tableSize; s += unroll) {
size_t u;
for (u = 0; u < unroll; ++u) {
size_t const uPosition = (position + (u * step)) & tableMask;
tableSymbol[uPosition] = spread[s + u];
}
position = (position + (unroll * step)) & tableMask;
}
assert(position == 0); /* Must have initialized all positions */
}
} else {
U32 position = 0;
U32 symbol;
for (symbol=0; symbol<maxSV1; symbol++) {
int nbOccurrences;
int const freq = normalizedCounter[symbol];
for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
position = (position + step) & tableMask;
while (position > highThreshold)
position = (position + step) & tableMask; /* Low proba area */
} }
assert(position==0); /* Must have initialized all positions */
}
/* Build table */
{ U32 u; for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
} }
/* Build Symbol Transformation Table */
{ unsigned total = 0;
unsigned s;
for (s=0; s<=maxSymbolValue; s++) {
switch (normalizedCounter[s])
{
case 0:
/* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
break;
case -1:
case 1:
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
assert(total <= INT_MAX);
symbolTT[s].deltaFindState = (int)(total - 1);
total ++;
break;
default :
assert(normalizedCounter[s] > 1);
{ U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1);
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
total += (unsigned)normalizedCounter[s];
} } } }
#if 0 /* debug : symbol costs */
DEBUGLOG(5, "\n --- table statistics : ");
{ U32 symbol;
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
symbol, normalizedCounter[symbol],
FSE_getMaxNbBits(symbolTT, symbol),
(double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
} }
#endif
return 0;
}
#ifndef FSE_COMMONDEFS_ONLY
/*-**************************************************************
* FSE NCount encoding
****************************************************************/
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
{
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ 4 /* bitCount initialized at 4 */
+ 2 /* first two symbols may use one additional bit each */) / 8)
+ 1 /* round up to whole nb bytes */
+ 2 /* additional two bytes for bitstream flush */;
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
}
static size_t
FSE_writeNCount_generic (void* header, size_t headerBufferSize,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
unsigned writeIsSafe)
{
BYTE* const ostart = (BYTE*) header;
BYTE* out = ostart;
BYTE* const oend = ostart + headerBufferSize;
int nbBits;
const int tableSize = 1 << tableLog;
int remaining;
int threshold;
U32 bitStream = 0;
int bitCount = 0;
unsigned symbol = 0;
unsigned const alphabetSize = maxSymbolValue + 1;
int previousIs0 = 0;
/* Table Size */
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
bitCount += 4;
/* Init */
remaining = tableSize+1; /* +1 for extra accuracy */
threshold = tableSize;
nbBits = tableLog+1;
while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
if (previousIs0) {
unsigned start = symbol;
while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
if (symbol == alphabetSize) break; /* incorrect distribution */
while (symbol >= start+24) {
start+=24;
bitStream += 0xFFFFU << bitCount;
if ((!writeIsSafe) && (out > oend-2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE) bitStream;
out[1] = (BYTE)(bitStream>>8);
out+=2;
bitStream>>=16;
}
while (symbol >= start+3) {
start+=3;
bitStream += 3 << bitCount;
bitCount += 2;
}
bitStream += (symbol-start) << bitCount;
bitCount += 2;
if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
bitStream >>= 16;
bitCount -= 16;
} }
{ int count = normalizedCounter[symbol++];
int const max = (2*threshold-1) - remaining;
remaining -= count < 0 ? -count : count;
count++; /* +1 for extra accuracy */
if (count>=threshold)
count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
bitStream += count << bitCount;
bitCount += nbBits;
bitCount -= (count<max);
previousIs0 = (count==1);
if (remaining<1) return ERROR(GENERIC);
while (remaining<threshold) { nbBits--; threshold>>=1; }
}
if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
bitStream >>= 16;
bitCount -= 16;
} }
if (remaining != 1)
return ERROR(GENERIC); /* incorrect normalized distribution */
assert(symbol <= alphabetSize);
/* flush remaining bitStream */
if ((!writeIsSafe) && (out > oend - 2))
return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out+= (bitCount+7) /8;
return (out-ostart);
}
size_t FSE_writeNCount (void* buffer, size_t bufferSize,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
}
/*-**************************************************************
* FSE Compression Code
****************************************************************/
FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
{
size_t size;
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
return (FSE_CTable*)ZSTD_malloc(size);
}
void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
/* provides the minimum logSize to safely represent a distribution */
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
{
U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
assert(srcSize > 1); /* Not supported, RLE should be used instead */
return minBits;
}
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
assert(srcSize > 1); /* Not supported, RLE should be used instead */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
return tableLog;
}
unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
{
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
}
/* Secondary normalization method.
To be used when primary method fails. */
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
{
short const NOT_YET_ASSIGNED = -2;
U32 s;
U32 distributed = 0;
U32 ToDistribute;
/* Init */
U32 const lowThreshold = (U32)(total >> tableLog);
U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
for (s=0; s<=maxSymbolValue; s++) {
if (count[s] == 0) {
norm[s]=0;
continue;
}
if (count[s] <= lowThreshold) {
norm[s] = lowProbCount;
distributed++;
total -= count[s];
continue;
}
if (count[s] <= lowOne) {
norm[s] = 1;
distributed++;
total -= count[s];
continue;
}
norm[s]=NOT_YET_ASSIGNED;
}
ToDistribute = (1 << tableLog) - distributed;
if (ToDistribute == 0)
return 0;
if ((total / ToDistribute) > lowOne) {
/* risk of rounding to zero */
lowOne = (U32)((total * 3) / (ToDistribute * 2));
for (s=0; s<=maxSymbolValue; s++) {
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
norm[s] = 1;
distributed++;
total -= count[s];
continue;
} }
ToDistribute = (1 << tableLog) - distributed;
}
if (distributed == maxSymbolValue+1) {
/* all values are pretty poor;
probably incompressible data (should have already been detected);
find max, then give all remaining points to max */
U32 maxV = 0, maxC = 0;
for (s=0; s<=maxSymbolValue; s++)
if (count[s] > maxC) { maxV=s; maxC=count[s]; }
norm[maxV] += (short)ToDistribute;
return 0;
}
if (total == 0) {
/* all of the symbols were low enough for the lowOne or lowThreshold */
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
if (norm[s] > 0) { ToDistribute--; norm[s]++; }
return 0;
}
{ U64 const vStepLog = 62 - tableLog;
U64 const mid = (1ULL << (vStepLog-1)) - 1;
U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
U64 tmpTotal = mid;
for (s=0; s<=maxSymbolValue; s++) {
if (norm[s]==NOT_YET_ASSIGNED) {
U64 const end = tmpTotal + (count[s] * rStep);
U32 const sStart = (U32)(tmpTotal >> vStepLog);
U32 const sEnd = (U32)(end >> vStepLog);
U32 const weight = sEnd - sStart;
if (weight < 1)
return ERROR(GENERIC);
norm[s] = (short)weight;
tmpTotal = end;
} } }
return 0;
}
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
const unsigned* count, size_t total,
unsigned maxSymbolValue, unsigned useLowProbCount)
{
/* Sanity checks */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
{ static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
short const lowProbCount = useLowProbCount ? -1 : 1;
U64 const scale = 62 - tableLog;
U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
U64 const vStep = 1ULL<<(scale-20);
int stillToDistribute = 1<<tableLog;
unsigned s;
unsigned largest=0;
short largestP=0;
U32 lowThreshold = (U32)(total >> tableLog);
for (s=0; s<=maxSymbolValue; s++) {
if (count[s] == total) return 0; /* rle special case */
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
if (count[s] <= lowThreshold) {
normalizedCounter[s] = lowProbCount;
stillToDistribute--;
} else {
short proba = (short)((count[s]*step) >> scale);
if (proba<8) {
U64 restToBeat = vStep * rtbTable[proba];
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
}
if (proba > largestP) { largestP=proba; largest=s; }
normalizedCounter[s] = proba;
stillToDistribute -= proba;
} }
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
/* corner case, need another normalization method */
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
if (FSE_isError(errorCode)) return errorCode;
}
else normalizedCounter[largest] += (short)stillToDistribute;
}
#if 0
{ /* Print Table (debug) */
U32 s;
U32 nTotal = 0;
for (s=0; s<=maxSymbolValue; s++)
RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
for (s=0; s<=maxSymbolValue; s++)
nTotal += abs(normalizedCounter[s]);
if (nTotal != (1U<<tableLog))
RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
getchar();
}
#endif
return tableLog;
}
/* fake FSE_CTable, for raw (uncompressed) input */
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
{
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* header */
tableU16[-2] = (U16) nbBits;
tableU16[-1] = (U16) maxSymbolValue;
/* Build table */
for (s=0; s<tableSize; s++)
tableU16[s] = (U16)(tableSize + s);
/* Build Symbol Transformation Table */
{ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
for (s=0; s<=maxSymbolValue; s++) {
symbolTT[s].deltaNbBits = deltaNbBits;
symbolTT[s].deltaFindState = s-1;
} }
return 0;
}
/* fake FSE_CTable, for rle input (always same symbol) */
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
{
void* ptr = ct;
U16* tableU16 = ( (U16*) ptr) + 2;
void* FSCTptr = (U32*)ptr + 2;
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
/* header */
tableU16[-2] = (U16) 0;
tableU16[-1] = (U16) symbolValue;
/* Build table */
tableU16[0] = 0;
tableU16[1] = 0; /* just in case */
/* Build Symbol Transformation Table */
symbolTT[symbolValue].deltaNbBits = 0;
symbolTT[symbolValue].deltaFindState = 0;
return 0;
}
static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
const void* src, size_t srcSize,
const FSE_CTable* ct, const unsigned fast)
{
const BYTE* const istart = (const BYTE*) src;
const BYTE* const iend = istart + srcSize;
const BYTE* ip=iend;
BIT_CStream_t bitC;
FSE_CState_t CState1, CState2;
/* init */
if (srcSize <= 2) return 0;
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
if (srcSize & 1) {
FSE_initCState2(&CState1, ct, *--ip);
FSE_initCState2(&CState2, ct, *--ip);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
FSE_FLUSHBITS(&bitC);
} else {
FSE_initCState2(&CState2, ct, *--ip);
FSE_initCState2(&CState1, ct, *--ip);
}
/* join to mod 4 */
srcSize -= 2;
if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
FSE_encodeSymbol(&bitC, &CState2, *--ip);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
FSE_FLUSHBITS(&bitC);
}
/* 2 or 4 encoding per loop */
while ( ip>istart ) {
FSE_encodeSymbol(&bitC, &CState2, *--ip);
if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
FSE_FLUSHBITS(&bitC);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
FSE_encodeSymbol(&bitC, &CState2, *--ip);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
}
FSE_FLUSHBITS(&bitC);
}
FSE_flushCState(&bitC, &CState2);
FSE_flushCState(&bitC, &CState1);
return BIT_closeCStream(&bitC);
}
size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
const void* src, size_t srcSize,
const FSE_CTable* ct)
{
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
if (fast)
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
else
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
}
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
#endif /* FSE_COMMONDEFS_ONLY */
| linux-master | lib/zstd/compress/fse_compress.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/* zstd_ddict.c :
* concentrates all logic that needs to know the internals of ZSTD_DDict object */
/*-*******************************************************
* Dependencies
*********************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "zstd_decompress_internal.h"
#include "zstd_ddict.h"
/*-*******************************************************
* Types
*********************************************************/
struct ZSTD_DDict_s {
void* dictBuffer;
const void* dictContent;
size_t dictSize;
ZSTD_entropyDTables_t entropy;
U32 dictID;
U32 entropyPresent;
ZSTD_customMem cMem;
}; /* typedef'd to ZSTD_DDict within "zstd.h" */
const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
{
assert(ddict != NULL);
return ddict->dictContent;
}
size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
{
assert(ddict != NULL);
return ddict->dictSize;
}
void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
DEBUGLOG(4, "ZSTD_copyDDictParameters");
assert(dctx != NULL);
assert(ddict != NULL);
dctx->dictID = ddict->dictID;
dctx->prefixStart = ddict->dictContent;
dctx->virtualStart = ddict->dictContent;
dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
dctx->previousDstEnd = dctx->dictEnd;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentBeginForFuzzing = dctx->prefixStart;
dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
#endif
if (ddict->entropyPresent) {
dctx->litEntropy = 1;
dctx->fseEntropy = 1;
dctx->LLTptr = ddict->entropy.LLTable;
dctx->MLTptr = ddict->entropy.MLTable;
dctx->OFTptr = ddict->entropy.OFTable;
dctx->HUFptr = ddict->entropy.hufTable;
dctx->entropy.rep[0] = ddict->entropy.rep[0];
dctx->entropy.rep[1] = ddict->entropy.rep[1];
dctx->entropy.rep[2] = ddict->entropy.rep[2];
} else {
dctx->litEntropy = 0;
dctx->fseEntropy = 0;
}
}
static size_t
ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
ZSTD_dictContentType_e dictContentType)
{
ddict->dictID = 0;
ddict->entropyPresent = 0;
if (dictContentType == ZSTD_dct_rawContent) return 0;
if (ddict->dictSize < 8) {
if (dictContentType == ZSTD_dct_fullDict)
return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
return 0; /* pure content mode */
}
{ U32 const magic = MEM_readLE32(ddict->dictContent);
if (magic != ZSTD_MAGIC_DICTIONARY) {
if (dictContentType == ZSTD_dct_fullDict)
return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
return 0; /* pure content mode */
}
}
ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
/* load entropy tables */
RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
&ddict->entropy, ddict->dictContent, ddict->dictSize)),
dictionary_corrupted, "");
ddict->entropyPresent = 1;
return 0;
}
static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
ddict->dictBuffer = NULL;
ddict->dictContent = dict;
if (!dict) dictSize = 0;
} else {
void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
ddict->dictBuffer = internalBuffer;
ddict->dictContent = internalBuffer;
if (!internalBuffer) return ERROR(memory_allocation);
ZSTD_memcpy(internalBuffer, dict, dictSize);
}
ddict->dictSize = dictSize;
ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
/* parse dictionary content */
FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
return 0;
}
ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_customMem customMem)
{
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
if (ddict == NULL) return NULL;
ddict->cMem = customMem;
{ size_t const initResult = ZSTD_initDDict_internal(ddict,
dict, dictSize,
dictLoadMethod, dictContentType);
if (ZSTD_isError(initResult)) {
ZSTD_freeDDict(ddict);
return NULL;
} }
return ddict;
}
}
/*! ZSTD_createDDict() :
* Create a digested dictionary, to start decompression without startup delay.
* `dict` content is copied inside DDict.
* Consequently, `dict` can be released after `ZSTD_DDict` creation */
ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
{
ZSTD_customMem const allocator = { NULL, NULL, NULL };
return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
}
/*! ZSTD_createDDict_byReference() :
* Create a digested dictionary, to start decompression without startup delay.
* Dictionary content is simply referenced, it will be accessed during decompression.
* Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
{
ZSTD_customMem const allocator = { NULL, NULL, NULL };
return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
}
const ZSTD_DDict* ZSTD_initStaticDDict(
void* sBuffer, size_t sBufferSize,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
size_t const neededSpace = sizeof(ZSTD_DDict)
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
assert(sBuffer != NULL);
assert(dict != NULL);
if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
if (sBufferSize < neededSpace) return NULL;
if (dictLoadMethod == ZSTD_dlm_byCopy) {
ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */
dict = ddict+1;
}
if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
dict, dictSize,
ZSTD_dlm_byRef, dictContentType) ))
return NULL;
return ddict;
}
size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
{
if (ddict==NULL) return 0; /* support free on NULL */
{ ZSTD_customMem const cMem = ddict->cMem;
ZSTD_customFree(ddict->dictBuffer, cMem);
ZSTD_customFree(ddict, cMem);
return 0;
}
}
/*! ZSTD_estimateDDictSize() :
* Estimate amount of memory that will be needed to create a dictionary for decompression.
* Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
{
return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
}
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
{
if (ddict==NULL) return 0; /* support sizeof on NULL */
return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
}
/*! ZSTD_getDictID_fromDDict() :
* Provides the dictID of the dictionary loaded into `ddict`.
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
{
if (ddict==NULL) return 0;
return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
}
| linux-master | lib/zstd/decompress/zstd_ddict.c |
/* ******************************************************************
* huff0 huffman decoder,
* part of Finite State Entropy library
* Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* **************************************************************
* Dependencies
****************************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
#include "../common/compiler.h"
#include "../common/bitstream.h" /* BIT_* */
#include "../common/fse.h" /* to compress headers */
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/error_private.h"
#include "../common/zstd_internal.h"
/* **************************************************************
* Constants
****************************************************************/
#define HUF_DECODER_FAST_TABLELOG 11
/* **************************************************************
* Macros
****************************************************************/
/* These two optional macros force the use one way or another of the two
* Huffman decompression implementations. You can't force in both directions
* at the same time.
*/
#if defined(HUF_FORCE_DECOMPRESS_X1) && \
defined(HUF_FORCE_DECOMPRESS_X2)
#error "Cannot force the use of the X1 and X2 decoders at the same time!"
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2
# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
#else
# define HUF_ASM_X86_64_BMI2_ATTRS
#endif
#define HUF_EXTERN_C
#define HUF_ASM_DECL HUF_EXTERN_C
#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
# define HUF_NEED_BMI2_FUNCTION 1
#else
# define HUF_NEED_BMI2_FUNCTION 0
#endif
#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
# define HUF_NEED_DEFAULT_FUNCTION 1
#else
# define HUF_NEED_DEFAULT_FUNCTION 0
#endif
/* **************************************************************
* Error Management
****************************************************************/
#define HUF_isError ERR_isError
/* **************************************************************
* Byte alignment for workSpace management
****************************************************************/
#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
/* **************************************************************
* BMI2 Variant Wrappers
****************************************************************/
#if DYNAMIC_BMI2
#define HUF_DGEN(fn) \
\
static size_t fn##_default( \
void* dst, size_t dstSize, \
const void* cSrc, size_t cSrcSize, \
const HUF_DTable* DTable) \
{ \
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
} \
\
static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \
void* dst, size_t dstSize, \
const void* cSrc, size_t cSrcSize, \
const HUF_DTable* DTable) \
{ \
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
} \
\
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
{ \
if (bmi2) { \
return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
} \
return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
}
#else
#define HUF_DGEN(fn) \
static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
{ \
(void)bmi2; \
return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
}
#endif
/*-***************************/
/* generic DTableDesc */
/*-***************************/
typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
{
DTableDesc dtd;
ZSTD_memcpy(&dtd, table, sizeof(dtd));
return dtd;
}
#if ZSTD_ENABLE_ASM_X86_64_BMI2
static size_t HUF_initDStream(BYTE const* ip) {
BYTE const lastByte = ip[7];
size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
size_t const value = MEM_readLEST(ip) | 1;
assert(bitsConsumed <= 8);
return value << bitsConsumed;
}
typedef struct {
BYTE const* ip[4];
BYTE* op[4];
U64 bits[4];
void const* dt;
BYTE const* ilimit;
BYTE* oend;
BYTE const* iend[4];
} HUF_DecompressAsmArgs;
/*
* Initializes args for the asm decoding loop.
* @returns 0 on success
* 1 if the fallback implementation should be used.
* Or an error code on failure.
*/
static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
{
void const* dt = DTable + 1;
U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
BYTE* const oend = (BYTE*)dst + dstSize;
/* The following condition is false on x32 platform,
* but HUF_asm is not compatible with this ABI */
if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1;
/* strict minimum : jump table + 1 byte per stream */
if (srcSize < 10)
return ERROR(corruption_detected);
/* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers.
* If table log is not correct at this point, fallback to the old decoder.
* On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
*/
if (dtLog != HUF_DECODER_FAST_TABLELOG)
return 1;
/* Read the jump table. */
{
const BYTE* const istart = (const BYTE*)src;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = srcSize - (length1 + length2 + length3 + 6);
args->iend[0] = istart + 6; /* jumpTable */
args->iend[1] = args->iend[0] + length1;
args->iend[2] = args->iend[1] + length2;
args->iend[3] = args->iend[2] + length3;
/* HUF_initDStream() requires this, and this small of an input
* won't benefit from the ASM loop anyways.
* length1 must be >= 16 so that ip[0] >= ilimit before the loop
* starts.
*/
if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
return 1;
if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
}
/* ip[] contains the position that is currently loaded into bits[]. */
args->ip[0] = args->iend[1] - sizeof(U64);
args->ip[1] = args->iend[2] - sizeof(U64);
args->ip[2] = args->iend[3] - sizeof(U64);
args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64);
/* op[] contains the output pointers. */
args->op[0] = (BYTE*)dst;
args->op[1] = args->op[0] + (dstSize+3)/4;
args->op[2] = args->op[1] + (dstSize+3)/4;
args->op[3] = args->op[2] + (dstSize+3)/4;
/* No point to call the ASM loop for tiny outputs. */
if (args->op[3] >= oend)
return 1;
/* bits[] is the bit container.
* It is read from the MSB down to the LSB.
* It is shifted left as it is read, and zeros are
* shifted in. After the lowest valid bit a 1 is
* set, so that CountTrailingZeros(bits[]) can be used
* to count how many bits we've consumed.
*/
args->bits[0] = HUF_initDStream(args->ip[0]);
args->bits[1] = HUF_initDStream(args->ip[1]);
args->bits[2] = HUF_initDStream(args->ip[2]);
args->bits[3] = HUF_initDStream(args->ip[3]);
/* If ip[] >= ilimit, it is guaranteed to be safe to
* reload bits[]. It may be beyond its section, but is
* guaranteed to be valid (>= istart).
*/
args->ilimit = ilimit;
args->oend = oend;
args->dt = dt;
return 0;
}
static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd)
{
/* Validate that we haven't overwritten. */
if (args->op[stream] > segmentEnd)
return ERROR(corruption_detected);
/* Validate that we haven't read beyond iend[].
* Note that ip[] may be < iend[] because the MSB is
* the next bit to read, and we may have consumed 100%
* of the stream, so down to iend[i] - 8 is valid.
*/
if (args->ip[stream] < args->iend[stream] - 8)
return ERROR(corruption_detected);
/* Construct the BIT_DStream_t. */
bit->bitContainer = MEM_readLE64(args->ip[stream]);
bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]);
bit->start = (const char*)args->iend[0];
bit->limitPtr = bit->start + sizeof(size_t);
bit->ptr = (const char*)args->ip[stream];
return 0;
}
#endif
#ifndef HUF_FORCE_DECOMPRESS_X2
/*-***************************/
/* single-symbol decoding */
/*-***************************/
typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */
/*
* Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
* a time.
*/
static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
U64 D4;
if (MEM_isLittleEndian()) {
D4 = (symbol << 8) + nbBits;
} else {
D4 = symbol + (nbBits << 8);
}
D4 *= 0x0001000100010001ULL;
return D4;
}
/*
* Increase the tableLog to targetTableLog and rescales the stats.
* If tableLog > targetTableLog this is a no-op.
* @returns New tableLog
*/
static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog)
{
if (tableLog > targetTableLog)
return tableLog;
if (tableLog < targetTableLog) {
U32 const scale = targetTableLog - tableLog;
U32 s;
/* Increase the weight for all non-zero probability symbols by scale. */
for (s = 0; s < nbSymbols; ++s) {
huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale);
}
/* Update rankVal to reflect the new weights.
* All weights except 0 get moved to weight + scale.
* Weights [1, scale] are empty.
*/
for (s = targetTableLog; s > scale; --s) {
rankVal[s] = rankVal[s - scale];
}
for (s = scale; s > 0; --s) {
rankVal[s] = 0;
}
}
return targetTableLog;
}
typedef struct {
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
} HUF_ReadDTableX1_Workspace;
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
{
return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
}
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
{
U32 tableLog = 0;
U32 nbSymbols = 0;
size_t iSize;
void* const dtPtr = DTable + 1;
HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
if (HUF_isError(iSize)) return iSize;
/* Table header */
{ DTableDesc dtd = HUF_getDTableDesc(DTable);
U32 const maxTableLog = dtd.maxTableLog + 1;
U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG);
tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog);
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
dtd.tableType = 0;
dtd.tableLog = (BYTE)tableLog;
ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
}
/* Compute symbols and rankStart given rankVal:
*
* rankVal already contains the number of values of each weight.
*
* symbols contains the symbols ordered by weight. First are the rankVal[0]
* weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
* symbols[0] is filled (but unused) to avoid a branch.
*
* rankStart contains the offset where each rank belongs in the DTable.
* rankStart[0] is not filled because there are no entries in the table for
* weight 0.
*/
{
int n;
int nextRankStart = 0;
int const unroll = 4;
int const nLimit = (int)nbSymbols - unroll + 1;
for (n=0; n<(int)tableLog+1; n++) {
U32 const curr = nextRankStart;
nextRankStart += wksp->rankVal[n];
wksp->rankStart[n] = curr;
}
for (n=0; n < nLimit; n += unroll) {
int u;
for (u=0; u < unroll; ++u) {
size_t const w = wksp->huffWeight[n+u];
wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
}
}
for (; n < (int)nbSymbols; ++n) {
size_t const w = wksp->huffWeight[n];
wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
}
}
/* fill DTable
* We fill all entries of each weight in order.
* That way length is a constant for each iteration of the outer loop.
* We can switch based on the length to a different inner loop which is
* optimized for that particular case.
*/
{
U32 w;
int symbol=wksp->rankVal[0];
int rankStart=0;
for (w=1; w<tableLog+1; ++w) {
int const symbolCount = wksp->rankVal[w];
int const length = (1 << w) >> 1;
int uStart = rankStart;
BYTE const nbBits = (BYTE)(tableLog + 1 - w);
int s;
int u;
switch (length) {
case 1:
for (s=0; s<symbolCount; ++s) {
HUF_DEltX1 D;
D.byte = wksp->symbols[symbol + s];
D.nbBits = nbBits;
dt[uStart] = D;
uStart += 1;
}
break;
case 2:
for (s=0; s<symbolCount; ++s) {
HUF_DEltX1 D;
D.byte = wksp->symbols[symbol + s];
D.nbBits = nbBits;
dt[uStart+0] = D;
dt[uStart+1] = D;
uStart += 2;
}
break;
case 4:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
MEM_write64(dt + uStart, D4);
uStart += 4;
}
break;
case 8:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
MEM_write64(dt + uStart, D4);
MEM_write64(dt + uStart + 4, D4);
uStart += 8;
}
break;
default:
for (s=0; s<symbolCount; ++s) {
U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
for (u=0; u < length; u += 16) {
MEM_write64(dt + uStart + u + 0, D4);
MEM_write64(dt + uStart + u + 4, D4);
MEM_write64(dt + uStart + u + 8, D4);
MEM_write64(dt + uStart + u + 12, D4);
}
assert(u == length);
uStart += length;
}
break;
}
symbol += symbolCount;
rankStart += symbolCount * length;
}
}
return iSize;
}
FORCE_INLINE_TEMPLATE BYTE
HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
BYTE const c = dt[val].byte;
BIT_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
*ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
HINT_INLINE size_t
HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
if ((pEnd - p) > 3) {
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
}
} else {
BIT_reloadDStream(bitDPtr);
}
/* [0-3] symbols remaining */
if (MEM_32bits())
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
/* no more data to retrieve from bitstream, no need to reload */
while (p < pEnd)
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
return pEnd-pStart;
}
FORCE_INLINE_TEMPLATE size_t
HUF_decompress1X1_usingDTable_internal_body(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + dstSize;
const void* dtPtr = DTable + 1;
const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
BIT_DStream_t bitD;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
return dstSize;
}
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X1_usingDTable_internal_body(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
/* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* const olimit = oend - 3;
const void* const dtPtr = DTable + 1;
const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
U32 endSignal = 1;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
/* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
if ((size_t)(oend - op4) >= sizeof(size_t)) {
for ( ; (endSignal) & (op4 < olimit) ; ) {
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
}
}
/* check corruption */
/* note : should not be necessary : op# advance in lock step, and we control op4.
* but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
/* check */
{ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endCheck) return ERROR(corruption_detected); }
/* decoded size */
return dstSize;
}
}
#if HUF_NEED_BMI2_FUNCTION
static BMI2_TARGET_ATTRIBUTE
size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
#if HUF_NEED_DEFAULT_FUNCTION
static
size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
static HUF_ASM_X86_64_BMI2_ATTRS
size_t
HUF_decompress4X1_usingDTable_internal_bmi2_asm(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
void const* dt = DTable + 1;
const BYTE* const iend = (const BYTE*)cSrc + 6;
BYTE* const oend = (BYTE*)dst + dstSize;
HUF_DecompressAsmArgs args;
{
size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
FORWARD_IF_ERROR(ret, "Failed to init asm args");
if (ret != 0)
return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
}
assert(args.ip[0] >= args.ilimit);
HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args);
/* Our loop guarantees that ip[] >= ilimit and that we haven't
* overwritten any op[].
*/
assert(args.ip[0] >= iend);
assert(args.ip[1] >= iend);
assert(args.ip[2] >= iend);
assert(args.ip[3] >= iend);
assert(args.op[3] <= oend);
(void)iend;
/* finish bit streams one by one. */
{
size_t const segmentSize = (dstSize+3) / 4;
BYTE* segmentEnd = (BYTE*)dst;
int i;
for (i = 0; i < 4; ++i) {
BIT_DStream_t bit;
if (segmentSize <= (size_t)(oend - segmentEnd))
segmentEnd += segmentSize;
else
segmentEnd = oend;
FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
/* Decompress and validate that we've produced exactly the expected length. */
args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG);
if (args.op[i] != segmentEnd) return ERROR(corruption_detected);
}
}
/* decoded size */
return dstSize;
}
#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
const void *cSrc,
size_t cSrcSize,
const HUF_DTable *DTable);
HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
# if ZSTD_ENABLE_ASM_X86_64_BMI2
return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
# else
return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
# endif
}
#else
(void)bmi2;
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
#else
return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
#endif
}
size_t HUF_decompress1X1_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
}
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
}
size_t HUF_decompress4X1_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
}
static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize, int bmi2)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
}
size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
}
#endif /* HUF_FORCE_DECOMPRESS_X2 */
#ifndef HUF_FORCE_DECOMPRESS_X1
/* *************************/
/* double-symbols decoding */
/* *************************/
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
typedef struct { BYTE symbol; } sortedSymbol_t;
typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
/*
* Constructs a HUF_DEltX2 in a U32.
*/
static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level)
{
U32 seq;
DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0);
DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2);
DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3);
DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32));
if (MEM_isLittleEndian()) {
seq = level == 1 ? symbol : (baseSeq + (symbol << 8));
return seq + (nbBits << 16) + ((U32)level << 24);
} else {
seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol);
return (seq << 16) + (nbBits << 8) + (U32)level;
}
}
/*
* Constructs a HUF_DEltX2.
*/
static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level)
{
HUF_DEltX2 DElt;
U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val));
ZSTD_memcpy(&DElt, &val, sizeof(val));
return DElt;
}
/*
* Constructs 2 HUF_DEltX2s and packs them into a U64.
*/
static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level)
{
U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
return (U64)DElt + ((U64)DElt << 32);
}
/*
* Fills the DTable rank with all the symbols from [begin, end) that are each
* nbBits long.
*
* @param DTableRank The start of the rank in the DTable.
* @param begin The first symbol to fill (inclusive).
* @param end The last symbol to fill (exclusive).
* @param nbBits Each symbol is nbBits long.
* @param tableLog The table log.
* @param baseSeq If level == 1 { 0 } else { the first level symbol }
* @param level The level in the table. Must be 1 or 2.
*/
static void HUF_fillDTableX2ForWeight(
HUF_DEltX2* DTableRank,
sortedSymbol_t const* begin, sortedSymbol_t const* end,
U32 nbBits, U32 tableLog,
U16 baseSeq, int const level)
{
U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */);
const sortedSymbol_t* ptr;
assert(level >= 1 && level <= 2);
switch (length) {
case 1:
for (ptr = begin; ptr != end; ++ptr) {
HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
*DTableRank++ = DElt;
}
break;
case 2:
for (ptr = begin; ptr != end; ++ptr) {
HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
DTableRank[0] = DElt;
DTableRank[1] = DElt;
DTableRank += 2;
}
break;
case 4:
for (ptr = begin; ptr != end; ++ptr) {
U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
DTableRank += 4;
}
break;
case 8:
for (ptr = begin; ptr != end; ++ptr) {
U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
DTableRank += 8;
}
break;
default:
for (ptr = begin; ptr != end; ++ptr) {
U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
HUF_DEltX2* const DTableRankEnd = DTableRank + length;
for (; DTableRank != DTableRankEnd; DTableRank += 8) {
ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
}
}
break;
}
}
/* HUF_fillDTableX2Level2() :
* `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits,
const U32* rankVal, const int minWeight, const int maxWeight1,
const sortedSymbol_t* sortedSymbols, U32 const* rankStart,
U32 nbBitsBaseline, U16 baseSeq)
{
/* Fill skipped values (all positions up to rankVal[minWeight]).
* These are positions only get a single symbol because the combined weight
* is too large.
*/
if (minWeight>1) {
U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */);
U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1);
int const skipSize = rankVal[minWeight];
assert(length > 1);
assert((U32)skipSize < length);
switch (length) {
case 2:
assert(skipSize == 1);
ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2));
break;
case 4:
assert(skipSize <= 4);
ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2));
break;
default:
{
int i;
for (i = 0; i < skipSize; i += 8) {
ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2));
}
}
}
}
/* Fill each of the second level symbols by weight. */
{
int w;
for (w = minWeight; w < maxWeight1; ++w) {
int const begin = rankStart[w];
int const end = rankStart[w+1];
U32 const nbBits = nbBitsBaseline - w;
U32 const totalBits = nbBits + consumedBits;
HUF_fillDTableX2ForWeight(
DTable + rankVal[w],
sortedSymbols + begin, sortedSymbols + end,
totalBits, targetLog,
baseSeq, /* level */ 2);
}
}
}
static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList,
const U32* rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32* const rankVal = rankValOrigin[0];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
int w;
int const wEnd = (int)maxWeight + 1;
/* Fill DTable in order of weight. */
for (w = 1; w < wEnd; ++w) {
int const begin = (int)rankStart[w];
int const end = (int)rankStart[w+1];
U32 const nbBits = nbBitsBaseline - w;
if (targetLog-nbBits >= minBits) {
/* Enough room for a second symbol. */
int start = rankVal[w];
U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */);
int minWeight = nbBits + scaleLog;
int s;
if (minWeight < 1) minWeight = 1;
/* Fill the DTable for every symbol of weight w.
* These symbols get at least 1 second symbol.
*/
for (s = begin; s != end; ++s) {
HUF_fillDTableX2Level2(
DTable + start, targetLog, nbBits,
rankValOrigin[nbBits], minWeight, wEnd,
sortedList, rankStart,
nbBitsBaseline, sortedList[s].symbol);
start += length;
}
} else {
/* Only a single symbol. */
HUF_fillDTableX2ForWeight(
DTable + rankVal[w],
sortedList + begin, sortedList + end,
nbBits, targetLog,
/* baseSeq */ 0, /* level */ 1);
}
}
}
typedef struct {
rankValCol_t rankVal[HUF_TABLELOG_MAX];
U32 rankStats[HUF_TABLELOG_MAX + 1];
U32 rankStart0[HUF_TABLELOG_MAX + 3];
sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
} HUF_ReadDTableX2_Workspace;
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
{
return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
}
size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize, int bmi2)
{
U32 tableLog, maxW, nbSymbols;
DTableDesc dtd = HUF_getDTableDesc(DTable);
U32 maxTableLog = dtd.maxTableLog;
size_t iSize;
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
U32 *rankStart;
HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
rankStart = wksp->rankStart0 + 1;
ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
/* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG;
/* find maxWeight */
for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
/* Get start index of each weight */
{ U32 w, nextRankStart = 0;
for (w=1; w<maxW+1; w++) {
U32 curr = nextRankStart;
nextRankStart += wksp->rankStats[w];
rankStart[w] = curr;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
rankStart[maxW+1] = nextRankStart;
}
/* sort symbols by weight */
{ U32 s;
for (s=0; s<nbSymbols; s++) {
U32 const w = wksp->weightList[s];
U32 const r = rankStart[w]++;
wksp->sortedSymbol[r].symbol = (BYTE)s;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{ U32* const rankVal0 = wksp->rankVal[0];
{ int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
U32 nextRankVal = 0;
U32 w;
for (w=1; w<maxW+1; w++) {
U32 curr = nextRankVal;
nextRankVal += wksp->rankStats[w] << (w+rescale);
rankVal0[w] = curr;
} }
{ U32 const minBits = tableLog+1 - maxW;
U32 consumed;
for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
U32* const rankValPtr = wksp->rankVal[consumed];
U32 w;
for (w = 1; w < maxW+1; w++) {
rankValPtr[w] = rankVal0[w] >> consumed;
} } } }
HUF_fillDTableX2(dt, maxTableLog,
wksp->sortedSymbol,
wksp->rankStart0, wksp->rankVal, maxW,
tableLog+1);
dtd.tableLog = (BYTE)maxTableLog;
dtd.tableType = 1;
ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
return iSize;
}
FORCE_INLINE_TEMPLATE U32
HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
ZSTD_memcpy(op, &dt[val].sequence, 2);
BIT_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
FORCE_INLINE_TEMPLATE U32
HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
ZSTD_memcpy(op, &dt[val].sequence, 1);
if (dt[val].length==1) {
BIT_skipBits(DStream, dt[val].nbBits);
} else {
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
BIT_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
/* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
}
}
return 1;
}
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
HINT_INLINE size_t
HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
const HUF_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) {
if (dtLog <= 11 && MEM_64bits()) {
/* up to 10 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) {
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
} else {
/* up to 8 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
}
} else {
BIT_reloadDStream(bitDPtr);
}
/* closer to end : up to 2 symbols at a time */
if ((size_t)(pEnd - p) >= 2) {
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
while (p <= pEnd-2)
HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
}
if (p < pEnd)
p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
return p-pStart;
}
FORCE_INLINE_TEMPLATE size_t
HUF_decompress1X2_usingDTable_internal_body(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
BIT_DStream_t bitD;
/* Init */
CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
/* decode */
{ BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
}
/* check */
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
FORCE_INLINE_TEMPLATE size_t
HUF_decompress4X2_usingDTable_internal_body(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* const olimit = oend - (sizeof(size_t)-1);
const void* const dtPtr = DTable+1;
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
size_t const segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal = 1;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
/* 16-32 symbols per loop (4-8 symbols per stream) */
if ((size_t)(oend - op4) >= sizeof(size_t)) {
for ( ; (endSignal) & (op4 < olimit); ) {
#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
#else
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = (U32)LIKELY((U32)
(BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
#endif
}
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
{ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endCheck) return ERROR(corruption_detected); }
/* decoded size */
return dstSize;
}
}
#if HUF_NEED_BMI2_FUNCTION
static BMI2_TARGET_ATTRIBUTE
size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
#if HUF_NEED_DEFAULT_FUNCTION
static
size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2
HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
static HUF_ASM_X86_64_BMI2_ATTRS size_t
HUF_decompress4X2_usingDTable_internal_bmi2_asm(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable) {
void const* dt = DTable + 1;
const BYTE* const iend = (const BYTE*)cSrc + 6;
BYTE* const oend = (BYTE*)dst + dstSize;
HUF_DecompressAsmArgs args;
{
size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
FORWARD_IF_ERROR(ret, "Failed to init asm args");
if (ret != 0)
return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
}
assert(args.ip[0] >= args.ilimit);
HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args);
/* note : op4 already verified within main loop */
assert(args.ip[0] >= iend);
assert(args.ip[1] >= iend);
assert(args.ip[2] >= iend);
assert(args.ip[3] >= iend);
assert(args.op[3] <= oend);
(void)iend;
/* finish bitStreams one by one */
{
size_t const segmentSize = (dstSize+3) / 4;
BYTE* segmentEnd = (BYTE*)dst;
int i;
for (i = 0; i < 4; ++i) {
BIT_DStream_t bit;
if (segmentSize <= (size_t)(oend - segmentEnd))
segmentEnd += segmentSize;
else
segmentEnd = oend;
FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG);
if (args.op[i] != segmentEnd)
return ERROR(corruption_detected);
}
}
/* decoded size */
return dstSize;
}
#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
# if ZSTD_ENABLE_ASM_X86_64_BMI2
return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
# else
return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
# endif
}
#else
(void)bmi2;
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
#else
return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
#endif
}
HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
size_t HUF_decompress1X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
}
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
}
size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
}
static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize, int bmi2)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
}
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
}
#endif /* HUF_FORCE_DECOMPRESS_X1 */
/* ***********************************/
/* Universal decompression selectors */
/* ***********************************/
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#else
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#endif
}
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#else
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#endif
}
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] =
{
/* single, double, quad */
{{0,0}, {1,1}}, /* Q==0 : impossible */
{{0,0}, {1,1}}, /* Q==1 : impossible */
{{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */
{{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */
{{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */
{{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */
{{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */
{{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */
{{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */
{{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */
{{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */
{{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */
{{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */
{{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */
{{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */
{{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */
};
#endif
/* HUF_selectDecoder() :
* Tells which decoder is likely to decode faster,
* based on a set of pre-computed metrics.
* @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
* Assumption : 0 < dstSize <= 128 KB */
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
{
assert(dstSize > 0);
assert(dstSize <= 128*1024);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dstSize;
(void)cSrcSize;
return 0;
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dstSize;
(void)cSrcSize;
return 1;
#else
/* decoder timing evaluation */
{ U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
U32 const D256 = (U32)(dstSize >> 8);
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */
return DTime1 < DTime0;
}
#endif
}
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
size_t dstSize, const void* cSrc,
size_t cSrcSize, void* workSpace,
size_t wkspSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize == 0) return ERROR(corruption_detected);
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#else
return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize):
HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#endif
}
}
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize);
#else
return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize):
HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize);
#endif
}
}
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#else
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#endif
}
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
}
#endif
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#else
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
#endif
}
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize == 0) return ERROR(corruption_detected);
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
#else
return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
#endif
}
}
| linux-master | lib/zstd/decompress/huf_decompress.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* HEAPMODE :
* Select how default decompression function ZSTD_decompress() allocates its context,
* on stack (0), or into heap (1, default; requires malloc()).
* Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
*/
#ifndef ZSTD_HEAPMODE
# define ZSTD_HEAPMODE 1
#endif
/*!
* LEGACY_SUPPORT :
* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
*/
/*!
* MAXWINDOWSIZE_DEFAULT :
* maximum window size accepted by DStream __by default__.
* Frames requiring more memory will be rejected.
* It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
*/
#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
#endif
/*!
* NO_FORWARD_PROGRESS_MAX :
* maximum allowed nb of calls to ZSTD_decompressStream()
* without any forward progress
* (defined as: no byte read from input, and no byte flushed to output)
* before triggering an error.
*/
#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
# define ZSTD_NO_FORWARD_PROGRESS_MAX 16
#endif
/*-*******************************************************
* Dependencies
*********************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
#include "../common/zstd_internal.h" /* blockProperties_t */
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
/* ***********************************
* Multiple DDicts Hashset internals *
*************************************/
#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
* Currently, that means a 0.75 load factor.
* So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
* the load factor of the ddict hash set.
*/
#define DDICT_HASHSET_TABLE_BASE_SIZE 64
#define DDICT_HASHSET_RESIZE_FACTOR 2
/* Hash function to determine starting position of dict insertion within the table
* Returns an index between [0, hashSet->ddictPtrTableSize]
*/
static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
const U64 hash = xxh64(&dictID, sizeof(U32), 0);
/* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
return hash & (hashSet->ddictPtrTableSize - 1);
}
/* Adds DDict to a hashset without resizing it.
* If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
* Returns 0 if successful, or a zstd error code if something went wrong.
*/
static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
while (hashSet->ddictPtrTable[idx] != NULL) {
/* Replace existing ddict if inserting ddict with same dictID */
if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
DEBUGLOG(4, "DictID already exists, replacing rather than adding");
hashSet->ddictPtrTable[idx] = ddict;
return 0;
}
idx &= idxRangeMask;
idx++;
}
DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
hashSet->ddictPtrTable[idx] = ddict;
hashSet->ddictPtrCount++;
return 0;
}
/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
* rehashes all values, allocates new table, frees old table.
* Returns 0 on success, otherwise a zstd error code.
*/
static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
size_t oldTableSize = hashSet->ddictPtrTableSize;
size_t i;
DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
hashSet->ddictPtrTable = newTable;
hashSet->ddictPtrTableSize = newTableSize;
hashSet->ddictPtrCount = 0;
for (i = 0; i < oldTableSize; ++i) {
if (oldTable[i] != NULL) {
FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
}
}
ZSTD_customFree((void*)oldTable, customMem);
DEBUGLOG(4, "Finished re-hash");
return 0;
}
/* Fetches a DDict with the given dictID
* Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
*/
static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
for (;;) {
size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
if (currDictID == dictID || currDictID == 0) {
/* currDictID == 0 implies a NULL ddict entry */
break;
} else {
idx &= idxRangeMask; /* Goes to start of table when we reach the end */
idx++;
}
}
DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
return hashSet->ddictPtrTable[idx];
}
/* Allocates space for and returns a ddict hash set
* The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
* Returns NULL if allocation failed.
*/
static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
DEBUGLOG(4, "Allocating new hash set");
if (!ret)
return NULL;
ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
if (!ret->ddictPtrTable) {
ZSTD_customFree(ret, customMem);
return NULL;
}
ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
ret->ddictPtrCount = 0;
return ret;
}
/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
* Note: The ZSTD_DDict* within the table are NOT freed.
*/
static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
DEBUGLOG(4, "Freeing ddict hash set");
if (hashSet && hashSet->ddictPtrTable) {
ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
}
if (hashSet) {
ZSTD_customFree(hashSet, customMem);
}
}
/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
* Returns 0 on success, or a ZSTD error.
*/
static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
}
FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
return 0;
}
/*-*************************************************************
* Context management
***************************************************************/
size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
{
if (dctx==NULL) return 0; /* support sizeof NULL */
return sizeof(*dctx)
+ ZSTD_sizeof_DDict(dctx->ddictLocal)
+ dctx->inBuffSize + dctx->outBuffSize;
}
size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
static size_t ZSTD_startingInputLength(ZSTD_format_e format)
{
size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
/* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
return startingInputLength;
}
static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
{
assert(dctx->streamStage == zdss_init);
dctx->format = ZSTD_f_zstd1;
dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
dctx->outBufferMode = ZSTD_bm_buffered;
dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
}
static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
{
dctx->staticSize = 0;
dctx->ddict = NULL;
dctx->ddictLocal = NULL;
dctx->dictEnd = NULL;
dctx->ddictIsCold = 0;
dctx->dictUses = ZSTD_dont_use;
dctx->inBuff = NULL;
dctx->inBuffSize = 0;
dctx->outBuffSize = 0;
dctx->streamStage = zdss_init;
dctx->noForwardProgress = 0;
dctx->oversizedDuration = 0;
#if DYNAMIC_BMI2
dctx->bmi2 = ZSTD_cpuSupportsBmi2();
#endif
dctx->ddictSet = NULL;
ZSTD_DCtx_resetParameters(dctx);
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentEndForFuzzing = NULL;
#endif
}
ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
{
ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
if ((size_t)workspace & 7) return NULL; /* 8-aligned */
if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
ZSTD_initDCtx_internal(dctx);
dctx->staticSize = workspaceSize;
dctx->inBuff = (char*)(dctx+1);
return dctx;
}
static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) {
if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
{ ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
if (!dctx) return NULL;
dctx->customMem = customMem;
ZSTD_initDCtx_internal(dctx);
return dctx;
}
}
ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
{
return ZSTD_createDCtx_internal(customMem);
}
ZSTD_DCtx* ZSTD_createDCtx(void)
{
DEBUGLOG(3, "ZSTD_createDCtx");
return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
}
static void ZSTD_clearDict(ZSTD_DCtx* dctx)
{
ZSTD_freeDDict(dctx->ddictLocal);
dctx->ddictLocal = NULL;
dctx->ddict = NULL;
dctx->dictUses = ZSTD_dont_use;
}
size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
{
if (dctx==NULL) return 0; /* support free on NULL */
RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
{ ZSTD_customMem const cMem = dctx->customMem;
ZSTD_clearDict(dctx);
ZSTD_customFree(dctx->inBuff, cMem);
dctx->inBuff = NULL;
if (dctx->ddictSet) {
ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
dctx->ddictSet = NULL;
}
ZSTD_customFree(dctx, cMem);
return 0;
}
}
/* no longer useful */
void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
{
size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
}
/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
* the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
* accordingly sets the ddict to be used to decompress the frame.
*
* If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
*
* ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
*/
static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
assert(dctx->refMultipleDDicts && dctx->ddictSet);
DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
if (dctx->ddict) {
const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
if (frameDDict) {
DEBUGLOG(4, "DDict found!");
ZSTD_clearDict(dctx);
dctx->dictID = dctx->fParams.dictID;
dctx->ddict = frameDDict;
dctx->dictUses = ZSTD_use_indefinitely;
}
}
}
/*-*************************************************************
* Frame header decoding
***************************************************************/
/*! ZSTD_isFrame() :
* Tells if the content of `buffer` starts with a valid Frame Identifier.
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
* Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
* Note 3 : Skippable Frame Identifiers are considered valid. */
unsigned ZSTD_isFrame(const void* buffer, size_t size)
{
if (size < ZSTD_FRAMEIDSIZE) return 0;
{ U32 const magic = MEM_readLE32(buffer);
if (magic == ZSTD_MAGICNUMBER) return 1;
if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
}
return 0;
}
/*! ZSTD_isSkippableFrame() :
* Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
*/
unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size)
{
if (size < ZSTD_FRAMEIDSIZE) return 0;
{ U32 const magic = MEM_readLE32(buffer);
if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
}
return 0;
}
/* ZSTD_frameHeaderSize_internal() :
* srcSize must be large enough to reach header size fields.
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
* @return : size of the Frame Header
* or an error code, which can be tested with ZSTD_isError() */
static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
{
size_t const minInputSize = ZSTD_startingInputLength(format);
RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
{ BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
U32 const dictID= fhd & 3;
U32 const singleSegment = (fhd >> 5) & 1;
U32 const fcsId = fhd >> 6;
return minInputSize + !singleSegment
+ ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ (singleSegment && !fcsId);
}
}
/* ZSTD_frameHeaderSize() :
* srcSize must be >= ZSTD_frameHeaderSize_prefix.
* @return : size of the Frame Header,
* or an error code (if srcSize is too small) */
size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
{
return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
}
/* ZSTD_getFrameHeader_advanced() :
* decode Frame Header, or require larger `srcSize`.
* note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
* or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
{
const BYTE* ip = (const BYTE*)src;
size_t const minInputSize = ZSTD_startingInputLength(format);
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
if (srcSize < minInputSize) return minInputSize;
RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
if ( (format != ZSTD_f_zstd1_magicless)
&& (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
/* skippable frame */
if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
zfhPtr->frameType = ZSTD_skippableFrame;
return 0;
}
RETURN_ERROR(prefix_unknown, "");
}
/* ensure there is enough `srcSize` to fully read/decode frame header */
{ size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
if (srcSize < fhsize) return fhsize;
zfhPtr->headerSize = (U32)fhsize;
}
{ BYTE const fhdByte = ip[minInputSize-1];
size_t pos = minInputSize;
U32 const dictIDSizeCode = fhdByte&3;
U32 const checksumFlag = (fhdByte>>2)&1;
U32 const singleSegment = (fhdByte>>5)&1;
U32 const fcsID = fhdByte>>6;
U64 windowSize = 0;
U32 dictID = 0;
U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
"reserved bits, must be zero");
if (!singleSegment) {
BYTE const wlByte = ip[pos++];
U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
windowSize = (1ULL << windowLog);
windowSize += (windowSize >> 3) * (wlByte&7);
}
switch(dictIDSizeCode)
{
default:
assert(0); /* impossible */
ZSTD_FALLTHROUGH;
case 0 : break;
case 1 : dictID = ip[pos]; pos++; break;
case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
}
switch(fcsID)
{
default:
assert(0); /* impossible */
ZSTD_FALLTHROUGH;
case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
}
if (singleSegment) windowSize = frameContentSize;
zfhPtr->frameType = ZSTD_frame;
zfhPtr->frameContentSize = frameContentSize;
zfhPtr->windowSize = windowSize;
zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
zfhPtr->dictID = dictID;
zfhPtr->checksumFlag = checksumFlag;
}
return 0;
}
/* ZSTD_getFrameHeader() :
* decode Frame Header, or require larger `srcSize`.
* note : this function does not consume input, it only reads it.
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
* or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
{
return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
}
/* ZSTD_getFrameContentSize() :
* compatible with legacy mode
* @return : decompressed size of the single frame pointed to be `src` if known, otherwise
* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
{
{ ZSTD_frameHeader zfh;
if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
return ZSTD_CONTENTSIZE_ERROR;
if (zfh.frameType == ZSTD_skippableFrame) {
return 0;
} else {
return zfh.frameContentSize;
} }
}
static size_t readSkippableFrameSize(void const* src, size_t srcSize)
{
size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
U32 sizeU32;
RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
frameParameter_unsupported, "");
{
size_t const skippableSize = skippableHeaderSize + sizeU32;
RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
return skippableSize;
}
}
/*! ZSTD_readSkippableFrame() :
* Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
*
* The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
* i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
* in the magicVariant.
*
* Returns an error if destination buffer is not large enough, or if the frame is not skippable.
*
* @return : number of bytes written or a ZSTD error.
*/
ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
const void* src, size_t srcSize)
{
U32 const magicNumber = MEM_readLE32(src);
size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
/* check input validity */
RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
/* deliver payload */
if (skippableContentSize > 0 && dst != NULL)
ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
if (magicVariant != NULL)
*magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
return skippableContentSize;
}
/* ZSTD_findDecompressedSize() :
* compatible with legacy mode
* `srcSize` must be the exact length of some number of ZSTD compressed and/or
* skippable frames
* @return : decompressed size of the frames contained */
unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long totalDstSize = 0;
while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
U32 const magicNumber = MEM_readLE32(src);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
if (ZSTD_isError(skippableSize)) {
return ZSTD_CONTENTSIZE_ERROR;
}
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
srcSize -= skippableSize;
continue;
}
{ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
/* check for overflow */
if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
totalDstSize += ret;
}
{ size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
if (ZSTD_isError(frameSrcSize)) {
return ZSTD_CONTENTSIZE_ERROR;
}
src = (const BYTE *)src + frameSrcSize;
srcSize -= frameSrcSize;
}
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
return totalDstSize;
}
/* ZSTD_getDecompressedSize() :
* compatible with legacy mode
* @return : decompressed size if known, 0 otherwise
note : 0 can mean any of the following :
- frame content is empty
- decompressed size field is not present in frame header
- frame header unknown / not supported
- frame header not complete (`srcSize` too small) */
unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
{
unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
}
/* ZSTD_decodeFrameHeader() :
* `headerSize` must be the size provided by ZSTD_frameHeaderSize().
* If multiple DDict references are enabled, also will choose the correct DDict to use.
* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
{
size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
if (ZSTD_isError(result)) return result; /* invalid header */
RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
/* Reference DDict requested by frame if dctx references multiple ddicts */
if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
ZSTD_DCtx_selectFrameDDict(dctx);
}
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
/* Skip the dictID check in fuzzing mode, because it makes the search
* harder.
*/
RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
dictionary_wrong, "");
#endif
dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
dctx->processedCSize += headerSize;
return 0;
}
static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
{
ZSTD_frameSizeInfo frameSizeInfo;
frameSizeInfo.compressedSize = ret;
frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
return frameSizeInfo;
}
static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
{
ZSTD_frameSizeInfo frameSizeInfo;
ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
&& (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
frameSizeInfo.compressedSize <= srcSize);
return frameSizeInfo;
} else {
const BYTE* ip = (const BYTE*)src;
const BYTE* const ipstart = ip;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
ZSTD_frameHeader zfh;
/* Extract Frame Header */
{ size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(ret))
return ZSTD_errorFrameSizeInfo(ret);
if (ret > 0)
return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
}
ip += zfh.headerSize;
remainingSize -= zfh.headerSize;
/* Iterate over each block */
while (1) {
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize))
return ZSTD_errorFrameSizeInfo(cBlockSize);
if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
ip += ZSTD_blockHeaderSize + cBlockSize;
remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
nbBlocks++;
if (blockProperties.lastBlock) break;
}
/* Final frame content checksum */
if (zfh.checksumFlag) {
if (remainingSize < 4)
return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
ip += 4;
}
frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
? zfh.frameContentSize
: nbBlocks * zfh.blockSizeMax;
return frameSizeInfo;
}
}
/* ZSTD_findFrameCompressedSize() :
* compatible with legacy mode
* `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
* `srcSize` must be at least as large as the frame contained
* @return : the compressed size of the frame starting at `src` */
size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
{
ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
return frameSizeInfo.compressedSize;
}
/* ZSTD_decompressBound() :
* compatible with legacy mode
* `src` must point to the start of a ZSTD frame or a skippeable frame
* `srcSize` must be at least as large as the frame contained
* @return : the maximum decompressed size of the compressed source
*/
unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
{
unsigned long long bound = 0;
/* Iterate over each frame */
while (srcSize > 0) {
ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
size_t const compressedSize = frameSizeInfo.compressedSize;
unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
return ZSTD_CONTENTSIZE_ERROR;
assert(srcSize >= compressedSize);
src = (const BYTE*)src + compressedSize;
srcSize -= compressedSize;
bound += decompressedBound;
}
return bound;
}
/*-*************************************************************
* Frame decoding
***************************************************************/
/* ZSTD_insertBlock() :
* insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
{
DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
ZSTD_checkContinuity(dctx, blockStart, blockSize);
dctx->previousDstEnd = (const char*)blockStart + blockSize;
return blockSize;
}
static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_copyRawBlock");
RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (srcSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
ZSTD_memmove(dst, src, srcSize);
return srcSize;
}
static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
BYTE b,
size_t regenSize)
{
RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
if (dst == NULL) {
if (regenSize == 0) return 0;
RETURN_ERROR(dstBuffer_null, "");
}
ZSTD_memset(dst, b, regenSize);
return regenSize;
}
static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
{
(void)dctx;
(void)uncompressedSize;
(void)compressedSize;
(void)streaming;
}
/*! ZSTD_decompressFrame() :
* @dctx must be properly initialized
* will update *srcPtr and *srcSizePtr,
* to make *srcPtr progress by one frame. */
static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void** srcPtr, size_t *srcSizePtr)
{
const BYTE* const istart = (const BYTE*)(*srcPtr);
const BYTE* ip = istart;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
BYTE* op = ostart;
size_t remainingSrcSize = *srcSizePtr;
DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
/* check */
RETURN_ERROR_IF(
remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
srcSize_wrong, "");
/* Frame Header */
{ size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
srcSize_wrong, "");
FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
}
/* Loop on each block */
while (1) {
BYTE* oBlockEnd = oend;
size_t decodedSize;
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) return cBlockSize;
ip += ZSTD_blockHeaderSize;
remainingSrcSize -= ZSTD_blockHeaderSize;
RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
if (ip >= op && ip < oBlockEnd) {
/* We are decompressing in-place. Limit the output pointer so that we
* don't overwrite the block that we are currently reading. This will
* fail decompression if the input & output pointers aren't spaced
* far enough apart.
*
* This is important to set, even when the pointers are far enough
* apart, because ZSTD_decompressBlock_internal() can decide to store
* literals in the output buffer, after the block it is decompressing.
* Since we don't want anything to overwrite our input, we have to tell
* ZSTD_decompressBlock_internal to never write past ip.
*
* See ZSTD_allocateLiteralsBuffer() for reference.
*/
oBlockEnd = op + (ip - op);
}
switch(blockProperties.blockType)
{
case bt_compressed:
decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
break;
case bt_raw :
/* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
break;
case bt_rle :
decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize);
break;
case bt_reserved :
default:
RETURN_ERROR(corruption_detected, "invalid block type");
}
if (ZSTD_isError(decodedSize)) return decodedSize;
if (dctx->validateChecksum)
xxh64_update(&dctx->xxhState, op, decodedSize);
if (decodedSize != 0)
op += decodedSize;
assert(ip != NULL);
ip += cBlockSize;
remainingSrcSize -= cBlockSize;
if (blockProperties.lastBlock) break;
}
if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
corruption_detected, "");
}
if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
if (!dctx->forceIgnoreChecksum) {
U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
U32 checkRead;
checkRead = MEM_readLE32(ip);
RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
}
ip += 4;
remainingSrcSize -= 4;
}
ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
/* Allow caller to get size read */
*srcPtr = ip;
*srcSizePtr = remainingSrcSize;
return (size_t)(op-ostart);
}
static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize,
const ZSTD_DDict* ddict)
{
void* const dststart = dst;
int moreThan1Frame = 0;
DEBUGLOG(5, "ZSTD_decompressMultiFrame");
assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
if (ddict) {
dict = ZSTD_DDict_dictContent(ddict);
dictSize = ZSTD_DDict_dictSize(ddict);
}
while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
{ U32 const magicNumber = MEM_readLE32(src);
DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
(unsigned)magicNumber, ZSTD_MAGICNUMBER);
if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
size_t const skippableSize = readSkippableFrameSize(src, srcSize);
FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
assert(skippableSize <= srcSize);
src = (const BYTE *)src + skippableSize;
srcSize -= skippableSize;
continue;
} }
if (ddict) {
/* we were called from ZSTD_decompress_usingDDict */
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
} else {
/* this will initialize correctly with no dict if dict == NULL, so
* use this in all cases but ddict */
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
}
ZSTD_checkContinuity(dctx, dst, dstCapacity);
{ const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
&src, &srcSize);
RETURN_ERROR_IF(
(ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
&& (moreThan1Frame==1),
srcSize_wrong,
"At least one frame successfully completed, "
"but following bytes are garbage: "
"it's more likely to be a srcSize error, "
"specifying more input bytes than size of frame(s). "
"Note: one could be unlucky, it might be a corruption error instead, "
"happening right at the place where we expect zstd magic bytes. "
"But this is _much_ less likely than a srcSize field error.");
if (ZSTD_isError(res)) return res;
assert(res <= dstCapacity);
if (res != 0)
dst = (BYTE*)dst + res;
dstCapacity -= res;
}
moreThan1Frame = 1;
} /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
return (size_t)((BYTE*)dst - (BYTE*)dststart);
}
size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize)
{
return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
}
static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
{
switch (dctx->dictUses) {
default:
assert(0 /* Impossible */);
ZSTD_FALLTHROUGH;
case ZSTD_dont_use:
ZSTD_clearDict(dctx);
return NULL;
case ZSTD_use_indefinitely:
return dctx->ddict;
case ZSTD_use_once:
dctx->dictUses = ZSTD_dont_use;
return dctx->ddict;
}
}
size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
}
size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
size_t regenSize;
ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem);
RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
ZSTD_freeDCtx(dctx);
return regenSize;
#else /* stack mode */
ZSTD_DCtx dctx;
ZSTD_initDCtx_internal(&dctx);
return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
#endif
}
/*-**************************************
* Advanced Streaming Decompression API
* Bufferless and synchronous
****************************************/
size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
/*
* Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed,
* we allow taking a partial block as the input. Currently only raw uncompressed blocks can
* be streamed.
*
* For blocks that can be streamed, this allows us to reduce the latency until we produce
* output, and avoid copying the input.
*
* @param inputSize - The total amount of input that the caller currently has.
*/
static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
return dctx->expected;
if (dctx->bType != bt_raw)
return dctx->expected;
return BOUNDED(1, inputSize, dctx->expected);
}
ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
switch(dctx->stage)
{
default: /* should not happen */
assert(0);
ZSTD_FALLTHROUGH;
case ZSTDds_getFrameHeaderSize:
ZSTD_FALLTHROUGH;
case ZSTDds_decodeFrameHeader:
return ZSTDnit_frameHeader;
case ZSTDds_decodeBlockHeader:
return ZSTDnit_blockHeader;
case ZSTDds_decompressBlock:
return ZSTDnit_block;
case ZSTDds_decompressLastBlock:
return ZSTDnit_lastBlock;
case ZSTDds_checkChecksum:
return ZSTDnit_checksum;
case ZSTDds_decodeSkippableHeader:
ZSTD_FALLTHROUGH;
case ZSTDds_skipFrame:
return ZSTDnit_skippableFrame;
}
}
static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
/* ZSTD_decompressContinue() :
* srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
* or an error code, which can be tested using ZSTD_isError() */
size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
/* Sanity check */
RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
ZSTD_checkContinuity(dctx, dst, dstCapacity);
dctx->processedCSize += srcSize;
switch (dctx->stage)
{
case ZSTDds_getFrameHeaderSize :
assert(src != NULL);
if (dctx->format == ZSTD_f_zstd1) { /* allows header */
assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
dctx->stage = ZSTDds_decodeSkippableHeader;
return 0;
} }
dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
dctx->expected = dctx->headerSize - srcSize;
dctx->stage = ZSTDds_decodeFrameHeader;
return 0;
case ZSTDds_decodeFrameHeader:
assert(src != NULL);
ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
dctx->expected = ZSTD_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
return 0;
case ZSTDds_decodeBlockHeader:
{ blockProperties_t bp;
size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
if (ZSTD_isError(cBlockSize)) return cBlockSize;
RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
dctx->expected = cBlockSize;
dctx->bType = bp.blockType;
dctx->rleSize = bp.origSize;
if (cBlockSize) {
dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
return 0;
}
/* empty block */
if (bp.lastBlock) {
if (dctx->fParams.checksumFlag) {
dctx->expected = 4;
dctx->stage = ZSTDds_checkChecksum;
} else {
dctx->expected = 0; /* end of frame */
dctx->stage = ZSTDds_getFrameHeaderSize;
}
} else {
dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
dctx->stage = ZSTDds_decodeBlockHeader;
}
return 0;
}
case ZSTDds_decompressLastBlock:
case ZSTDds_decompressBlock:
DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
{ size_t rSize;
switch(dctx->bType)
{
case bt_compressed:
DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming);
dctx->expected = 0; /* Streaming not supported */
break;
case bt_raw :
assert(srcSize <= dctx->expected);
rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
assert(rSize == srcSize);
dctx->expected -= rSize;
break;
case bt_rle :
rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
dctx->expected = 0; /* Streaming not supported */
break;
case bt_reserved : /* should never happen */
default:
RETURN_ERROR(corruption_detected, "invalid block type");
}
FORWARD_IF_ERROR(rSize, "");
RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
dctx->decodedSize += rSize;
if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
dctx->previousDstEnd = (char*)dst + rSize;
/* Stay on the same stage until we are finished streaming the block. */
if (dctx->expected > 0) {
return rSize;
}
if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
RETURN_ERROR_IF(
dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& dctx->decodedSize != dctx->fParams.frameContentSize,
corruption_detected, "");
if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
dctx->expected = 4;
dctx->stage = ZSTDds_checkChecksum;
} else {
ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
dctx->expected = 0; /* ends here */
dctx->stage = ZSTDds_getFrameHeaderSize;
}
} else {
dctx->stage = ZSTDds_decodeBlockHeader;
dctx->expected = ZSTD_blockHeaderSize;
}
return rSize;
}
case ZSTDds_checkChecksum:
assert(srcSize == 4); /* guaranteed by dctx->expected */
{
if (dctx->validateChecksum) {
U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
U32 const check32 = MEM_readLE32(src);
DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
}
ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
}
case ZSTDds_decodeSkippableHeader:
assert(src != NULL);
assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
dctx->stage = ZSTDds_skipFrame;
return 0;
case ZSTDds_skipFrame:
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
default:
assert(0); /* impossible */
RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
}
}
static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
dctx->dictEnd = dctx->previousDstEnd;
dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
dctx->prefixStart = dict;
dctx->previousDstEnd = (const char*)dict + dictSize;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
dctx->dictContentBeginForFuzzing = dctx->prefixStart;
dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
#endif
return 0;
}
/*! ZSTD_loadDEntropy() :
* dict : must point at beginning of a valid zstd dictionary.
* @return : size of entropy tables read */
size_t
ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
const void* const dict, size_t const dictSize)
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
dictPtr += 8; /* skip header = magic + dictID */
ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
{ void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
#ifdef HUF_FORCE_DECOMPRESS_X1
/* in minimal huffman, we always use X1 variants */
size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
dictPtr, dictEnd - dictPtr,
workspace, workspaceSize);
#else
size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
dictPtr, (size_t)(dictEnd - dictPtr),
workspace, workspaceSize);
#endif
RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
dictPtr += hSize;
}
{ short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue = MaxOff, offcodeLog;
size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->OFTable,
offcodeNCount, offcodeMaxValue,
OF_base, OF_bits,
offcodeLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */0);
dictPtr += offcodeHeaderSize;
}
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->MLTable,
matchlengthNCount, matchlengthMaxValue,
ML_base, ML_bits,
matchlengthLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */ 0);
dictPtr += matchlengthHeaderSize;
}
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
ZSTD_buildFSETable( entropy->LLTable,
litlengthNCount, litlengthMaxValue,
LL_base, LL_bits,
litlengthLog,
entropy->workspace, sizeof(entropy->workspace),
/* bmi2 */ 0);
dictPtr += litlengthHeaderSize;
}
RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
{ int i;
size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
for (i=0; i<3; i++) {
U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
dictionary_corrupted, "");
entropy->rep[i] = rep;
} }
return (size_t)(dictPtr - (const BYTE*)dict);
}
static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
{ U32 const magic = MEM_readLE32(dict);
if (magic != ZSTD_MAGIC_DICTIONARY) {
return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
} }
dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
/* load entropy tables */
{ size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
dict = (const char*)dict + eSize;
dictSize -= eSize;
}
dctx->litEntropy = dctx->fseEntropy = 1;
/* reference dictionary content */
return ZSTD_refDictContent(dctx, dict, dictSize);
}
size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
{
assert(dctx != NULL);
dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
dctx->stage = ZSTDds_getFrameHeaderSize;
dctx->processedCSize = 0;
dctx->decodedSize = 0;
dctx->previousDstEnd = NULL;
dctx->prefixStart = NULL;
dctx->virtualStart = NULL;
dctx->dictEnd = NULL;
dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
dctx->litEntropy = dctx->fseEntropy = 0;
dctx->dictID = 0;
dctx->bType = bt_reserved;
ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
dctx->LLTptr = dctx->entropy.LLTable;
dctx->MLTptr = dctx->entropy.MLTable;
dctx->OFTptr = dctx->entropy.OFTable;
dctx->HUFptr = dctx->entropy.hufTable;
return 0;
}
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
if (dict && dictSize)
RETURN_ERROR_IF(
ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
dictionary_corrupted, "");
return 0;
}
/* ====== ZSTD_DDict ====== */
size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
assert(dctx != NULL);
if (ddict) {
const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
size_t const dictSize = ZSTD_DDict_dictSize(ddict);
const void* const dictEnd = dictStart + dictSize;
dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
DEBUGLOG(4, "DDict is %s",
dctx->ddictIsCold ? "~cold~" : "hot!");
}
FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
if (ddict) { /* NULL ddict is equivalent to no dictionary */
ZSTD_copyDDictParameters(dctx, ddict);
}
return 0;
}
/*! ZSTD_getDictID_fromDict() :
* Provides the dictID stored within dictionary.
* if @return == 0, the dictionary is not conformant with Zstandard specification.
* It can still be loaded, but as a content-only dictionary. */
unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
{
if (dictSize < 8) return 0;
if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
}
/*! ZSTD_getDictID_fromFrame() :
* Provides the dictID required to decompress frame stored within `src`.
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
* - The frame does not require a dictionary (most common case).
* - The frame was built with dictID intentionally removed.
* Needed dictionary is a hidden information.
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, frame header could not be decoded.
* Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
* - This is not a Zstandard frame.
* When identifying the exact failure cause, it's possible to use
* ZSTD_getFrameHeader(), which will provide a more precise error code. */
unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
{
ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
if (ZSTD_isError(hError)) return 0;
return zfp.dictID;
}
/*! ZSTD_decompress_usingDDict() :
* Decompression using a pre-digested Dictionary
* Use dictionary without significant overhead. */
size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_DDict* ddict)
{
/* pass content and size in case legacy frames are encountered */
return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
NULL, 0,
ddict);
}
/*=====================================
* Streaming decompression
*====================================*/
ZSTD_DStream* ZSTD_createDStream(void)
{
DEBUGLOG(3, "ZSTD_createDStream");
return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
}
ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
{
return ZSTD_initStaticDCtx(workspace, workspaceSize);
}
ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
{
return ZSTD_createDCtx_internal(customMem);
}
size_t ZSTD_freeDStream(ZSTD_DStream* zds)
{
return ZSTD_freeDCtx(zds);
}
/* *** Initialization *** */
size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
if (dict && dictSize != 0) {
dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
dctx->ddict = dctx->ddictLocal;
dctx->dictUses = ZSTD_use_indefinitely;
}
return 0;
}
size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
}
size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
{
return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
}
size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
{
FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
dctx->dictUses = ZSTD_use_once;
return 0;
}
size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
{
return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
}
/* ZSTD_initDStream_usingDict() :
* return : expected size, aka ZSTD_startingInputLength().
* this function cannot fail */
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
{
DEBUGLOG(4, "ZSTD_initDStream_usingDict");
FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
return ZSTD_startingInputLength(zds->format);
}
/* note : this variant can't fail */
size_t ZSTD_initDStream(ZSTD_DStream* zds)
{
DEBUGLOG(4, "ZSTD_initDStream");
return ZSTD_initDStream_usingDDict(zds, NULL);
}
/* ZSTD_initDStream_usingDDict() :
* ddict will just be referenced, and must outlive decompression session
* this function cannot fail */
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
{
FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
return ZSTD_startingInputLength(dctx->format);
}
/* ZSTD_resetDStream() :
* return : expected size, aka ZSTD_startingInputLength().
* this function cannot fail */
size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
{
FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
return ZSTD_startingInputLength(dctx->format);
}
size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
if (ddict) {
dctx->ddict = ddict;
dctx->dictUses = ZSTD_use_indefinitely;
if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
if (dctx->ddictSet == NULL) {
dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
if (!dctx->ddictSet) {
RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
}
}
assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */
FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
}
}
return 0;
}
/* ZSTD_DCtx_setMaxWindowSize() :
* note : no direct equivalence in ZSTD_DCtx_setParameter,
* since this version sets windowSize, and the other sets windowLog */
size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
{
ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
size_t const min = (size_t)1 << bounds.lowerBound;
size_t const max = (size_t)1 << bounds.upperBound;
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
dctx->maxWindowSize = maxWindowSize;
return 0;
}
size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
{
return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
}
ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
{
ZSTD_bounds bounds = { 0, 0, 0 };
switch(dParam) {
case ZSTD_d_windowLogMax:
bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
bounds.upperBound = ZSTD_WINDOWLOG_MAX;
return bounds;
case ZSTD_d_format:
bounds.lowerBound = (int)ZSTD_f_zstd1;
bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
return bounds;
case ZSTD_d_stableOutBuffer:
bounds.lowerBound = (int)ZSTD_bm_buffered;
bounds.upperBound = (int)ZSTD_bm_stable;
return bounds;
case ZSTD_d_forceIgnoreChecksum:
bounds.lowerBound = (int)ZSTD_d_validateChecksum;
bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
return bounds;
case ZSTD_d_refMultipleDDicts:
bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
return bounds;
default:;
}
bounds.error = ERROR(parameter_unsupported);
return bounds;
}
/* ZSTD_dParam_withinBounds:
* @return 1 if value is within dParam bounds,
* 0 otherwise */
static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
{
ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
if (ZSTD_isError(bounds.error)) return 0;
if (value < bounds.lowerBound) return 0;
if (value > bounds.upperBound) return 0;
return 1;
}
#define CHECK_DBOUNDS(p,v) { \
RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
}
size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
{
switch (param) {
case ZSTD_d_windowLogMax:
*value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
return 0;
case ZSTD_d_format:
*value = (int)dctx->format;
return 0;
case ZSTD_d_stableOutBuffer:
*value = (int)dctx->outBufferMode;
return 0;
case ZSTD_d_forceIgnoreChecksum:
*value = (int)dctx->forceIgnoreChecksum;
return 0;
case ZSTD_d_refMultipleDDicts:
*value = (int)dctx->refMultipleDDicts;
return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");
}
size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
{
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
switch(dParam) {
case ZSTD_d_windowLogMax:
if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
dctx->maxWindowSize = ((size_t)1) << value;
return 0;
case ZSTD_d_format:
CHECK_DBOUNDS(ZSTD_d_format, value);
dctx->format = (ZSTD_format_e)value;
return 0;
case ZSTD_d_stableOutBuffer:
CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
dctx->outBufferMode = (ZSTD_bufferMode_e)value;
return 0;
case ZSTD_d_forceIgnoreChecksum:
CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
return 0;
case ZSTD_d_refMultipleDDicts:
CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
if (dctx->staticSize != 0) {
RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
}
dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
return 0;
default:;
}
RETURN_ERROR(parameter_unsupported, "");
}
size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
{
if ( (reset == ZSTD_reset_session_only)
|| (reset == ZSTD_reset_session_and_parameters) ) {
dctx->streamStage = zdss_init;
dctx->noForwardProgress = 0;
}
if ( (reset == ZSTD_reset_parameters)
|| (reset == ZSTD_reset_session_and_parameters) ) {
RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
ZSTD_clearDict(dctx);
ZSTD_DCtx_resetParameters(dctx);
}
return 0;
}
size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
{
return ZSTD_sizeof_DCtx(dctx);
}
size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
{
size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
/* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
size_t const minRBSize = (size_t) neededSize;
RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
frameParameter_windowTooLarge, "");
return minRBSize;
}
size_t ZSTD_estimateDStreamSize(size_t windowSize)
{
size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
size_t const inBuffSize = blockSize; /* no block can be larger */
size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
}
size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
{
U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
ZSTD_frameHeader zfh;
size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
if (ZSTD_isError(err)) return err;
RETURN_ERROR_IF(err>0, srcSize_wrong, "");
RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
frameParameter_windowTooLarge, "");
return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
}
/* ***** Decompression ***** */
static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
{
return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
}
static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
{
if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
zds->oversizedDuration++;
else
zds->oversizedDuration = 0;
}
static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
{
return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
}
/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
{
ZSTD_outBuffer const expect = zds->expectedOutBuffer;
/* No requirement when ZSTD_obm_stable is not enabled. */
if (zds->outBufferMode != ZSTD_bm_stable)
return 0;
/* Any buffer is allowed in zdss_init, this must be the same for every other call until
* the context is reset.
*/
if (zds->streamStage == zdss_init)
return 0;
/* The buffer must match our expectation exactly. */
if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
return 0;
RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
}
/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
* and updates the stage and the output buffer state. This call is extracted so it can be
* used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
* NOTE: You must break after calling this function since the streamStage is modified.
*/
static size_t ZSTD_decompressContinueStream(
ZSTD_DStream* zds, char** op, char* oend,
void const* src, size_t srcSize) {
int const isSkipFrame = ZSTD_isSkipFrame(zds);
if (zds->outBufferMode == ZSTD_bm_buffered) {
size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
size_t const decodedSize = ZSTD_decompressContinue(zds,
zds->outBuff + zds->outStart, dstSize, src, srcSize);
FORWARD_IF_ERROR(decodedSize, "");
if (!decodedSize && !isSkipFrame) {
zds->streamStage = zdss_read;
} else {
zds->outEnd = zds->outStart + decodedSize;
zds->streamStage = zdss_flush;
}
} else {
/* Write directly into the output buffer */
size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
FORWARD_IF_ERROR(decodedSize, "");
*op += decodedSize;
/* Flushing is not needed. */
zds->streamStage = zdss_read;
assert(*op <= oend);
assert(zds->outBufferMode == ZSTD_bm_stable);
}
return 0;
}
size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
const char* const src = (const char*)input->src;
const char* const istart = input->pos != 0 ? src + input->pos : src;
const char* const iend = input->size != 0 ? src + input->size : src;
const char* ip = istart;
char* const dst = (char*)output->dst;
char* const ostart = output->pos != 0 ? dst + output->pos : dst;
char* const oend = output->size != 0 ? dst + output->size : dst;
char* op = ostart;
U32 someMoreWork = 1;
DEBUGLOG(5, "ZSTD_decompressStream");
RETURN_ERROR_IF(
input->pos > input->size,
srcSize_wrong,
"forbidden. in: pos: %u vs size: %u",
(U32)input->pos, (U32)input->size);
RETURN_ERROR_IF(
output->pos > output->size,
dstSize_tooSmall,
"forbidden. out: pos: %u vs size: %u",
(U32)output->pos, (U32)output->size);
DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
while (someMoreWork) {
switch(zds->streamStage)
{
case zdss_init :
DEBUGLOG(5, "stage zdss_init => transparent reset ");
zds->streamStage = zdss_loadHeader;
zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
zds->hostageByte = 0;
zds->expectedOutBuffer = *output;
ZSTD_FALLTHROUGH;
case zdss_loadHeader :
DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
{ size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
if (zds->refMultipleDDicts && zds->ddictSet) {
ZSTD_DCtx_selectFrameDDict(zds);
}
DEBUGLOG(5, "header size : %u", (U32)hSize);
if (ZSTD_isError(hSize)) {
return hSize; /* error */
}
if (hSize != 0) { /* need more input */
size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
size_t const remainingInput = (size_t)(iend-ip);
assert(iend >= ip);
if (toLoad > remainingInput) { /* not enough input to load full header */
if (remainingInput > 0) {
ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
zds->lhSize += remainingInput;
}
input->pos = input->size;
return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
}
assert(ip != NULL);
ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
break;
} }
/* check for single-pass mode opportunity */
if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& zds->fParams.frameType != ZSTD_skippableFrame
&& (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
if (cSize <= (size_t)(iend-istart)) {
/* shortcut : using single-pass mode */
size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
if (ZSTD_isError(decompressedSize)) return decompressedSize;
DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
ip = istart + cSize;
op += decompressedSize;
zds->expected = 0;
zds->streamStage = zdss_init;
someMoreWork = 0;
break;
} }
/* Check output buffer is large enough for ZSTD_odm_stable. */
if (zds->outBufferMode == ZSTD_bm_stable
&& zds->fParams.frameType != ZSTD_skippableFrame
&& zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
&& (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
}
/* Consume header (see ZSTDds_decodeFrameHeader) */
DEBUGLOG(4, "Consume header");
FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
zds->stage = ZSTDds_skipFrame;
} else {
FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
zds->expected = ZSTD_blockHeaderSize;
zds->stage = ZSTDds_decodeBlockHeader;
}
/* control buffer memory usage */
DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
(U32)(zds->fParams.windowSize >>10),
(U32)(zds->maxWindowSize >> 10) );
zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
frameParameter_windowTooLarge, "");
/* Adapt buffer sizes to frame header instructions */
{ size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
: 0;
ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
{ int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
if (tooSmall || tooLarge) {
size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
DEBUGLOG(4, "inBuff : from %u to %u",
(U32)zds->inBuffSize, (U32)neededInBuffSize);
DEBUGLOG(4, "outBuff : from %u to %u",
(U32)zds->outBuffSize, (U32)neededOutBuffSize);
if (zds->staticSize) { /* static DCtx */
DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
RETURN_ERROR_IF(
bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
memory_allocation, "");
} else {
ZSTD_customFree(zds->inBuff, zds->customMem);
zds->inBuffSize = 0;
zds->outBuffSize = 0;
zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
}
zds->inBuffSize = neededInBuffSize;
zds->outBuff = zds->inBuff + zds->inBuffSize;
zds->outBuffSize = neededOutBuffSize;
} } }
zds->streamStage = zdss_read;
ZSTD_FALLTHROUGH;
case zdss_read:
DEBUGLOG(5, "stage zdss_read");
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
if (neededInSize==0) { /* end of frame */
zds->streamStage = zdss_init;
someMoreWork = 0;
break;
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
ip += neededInSize;
/* Function modifies the stage so we must break */
break;
} }
if (ip==iend) { someMoreWork = 0; break; } /* no more input */
zds->streamStage = zdss_load;
ZSTD_FALLTHROUGH;
case zdss_load:
{ size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
size_t const toLoad = neededInSize - zds->inPos;
int const isSkipFrame = ZSTD_isSkipFrame(zds);
size_t loadedSize;
/* At this point we shouldn't be decompressing a block that we can stream. */
assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
if (isSkipFrame) {
loadedSize = MIN(toLoad, (size_t)(iend-ip));
} else {
RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
corruption_detected,
"should never happen");
loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
}
ip += loadedSize;
zds->inPos += loadedSize;
if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
zds->inPos = 0; /* input is consumed */
FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
/* Function modifies the stage so we must break */
break;
}
case zdss_flush:
{ size_t const toFlushSize = zds->outEnd - zds->outStart;
size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
op += flushedSize;
zds->outStart += flushedSize;
if (flushedSize == toFlushSize) { /* flush completed */
zds->streamStage = zdss_read;
if ( (zds->outBuffSize < zds->fParams.frameContentSize)
&& (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
(int)(zds->outBuffSize - zds->outStart),
(U32)zds->fParams.blockSizeMax);
zds->outStart = zds->outEnd = 0;
}
break;
} }
/* cannot complete flush */
someMoreWork = 0;
break;
default:
assert(0); /* impossible */
RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
} }
/* result */
input->pos = (size_t)(ip - (const char*)(input->src));
output->pos = (size_t)(op - (char*)(output->dst));
/* Update the expected output buffer for ZSTD_obm_stable. */
zds->expectedOutBuffer = *output;
if ((ip==istart) && (op==ostart)) { /* no forward progress */
zds->noForwardProgress ++;
if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
assert(0);
}
} else {
zds->noForwardProgress = 0;
}
{ size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
if (!nextSrcSizeHint) { /* frame fully decoded */
if (zds->outEnd == zds->outStart) { /* output fully flushed */
if (zds->hostageByte) {
if (input->pos >= input->size) {
/* can't release hostage (not present) */
zds->streamStage = zdss_read;
return 1;
}
input->pos++; /* release hostage */
} /* zds->hostageByte */
return 0;
} /* zds->outEnd == zds->outStart */
if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
zds->hostageByte=1;
}
return 1;
} /* nextSrcSizeHint==0 */
nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
assert(zds->inPos <= nextSrcSizeHint);
nextSrcSizeHint -= zds->inPos; /* part already loaded*/
return nextSrcSizeHint;
}
}
size_t ZSTD_decompressStream_simpleArgs (
ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity, size_t* dstPos,
const void* src, size_t srcSize, size_t* srcPos)
{
ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
ZSTD_inBuffer input = { src, srcSize, *srcPos };
/* ZSTD_compress_generic() will check validity of dstPos and srcPos */
size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
}
| linux-master | lib/zstd/decompress/zstd_decompress.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/* zstd_decompress_block :
* this module takes care of decompressing _compressed_ block */
/*-*******************************************************
* Dependencies
*********************************************************/
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
#include "../common/compiler.h" /* prefetch */
#include "../common/cpu.h" /* bmi2 */
#include "../common/mem.h" /* low level memory routines */
#define FSE_STATIC_LINKING_ONLY
#include "../common/fse.h"
#define HUF_STATIC_LINKING_ONLY
#include "../common/huf.h"
#include "../common/zstd_internal.h"
#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
#include "zstd_decompress_block.h"
/*_*******************************************************
* Macros
**********************************************************/
/* These two optional macros force the use one way or another of the two
* ZSTD_decompressSequences implementations. You can't force in both directions
* at the same time.
*/
#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
#endif
/*_*******************************************************
* Memory operations
**********************************************************/
static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
/*-*************************************************************
* Block decoding
***************************************************************/
/*! ZSTD_getcBlockSize() :
* Provides the size of compressed block from block header `src` */
size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
blockProperties_t* bpPtr)
{
RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
{ U32 const cBlockHeader = MEM_readLE24(src);
U32 const cSize = cBlockHeader >> 3;
bpPtr->lastBlock = cBlockHeader & 1;
bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
bpPtr->origSize = cSize; /* only useful for RLE */
if (bpPtr->blockType == bt_rle) return 1;
RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
return cSize;
}
}
/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */
static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize,
const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately)
{
if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH)
{
/* room for litbuffer to fit without read faulting */
dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH;
dctx->litBufferEnd = dctx->litBuffer + litSize;
dctx->litBufferLocation = ZSTD_in_dst;
}
else if (litSize > ZSTD_LITBUFFEREXTRASIZE)
{
/* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
if (splitImmediately) {
/* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE;
}
else {
/* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */
dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize;
dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize;
}
dctx->litBufferLocation = ZSTD_split;
}
else
{
/* fits entirely within litExtraBuffer, so no split is necessary */
dctx->litBuffer = dctx->litExtraBuffer;
dctx->litBufferEnd = dctx->litBuffer + litSize;
dctx->litBufferLocation = ZSTD_not_in_dst;
}
}
/* Hidden declaration for fullbench */
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize,
void* dst, size_t dstCapacity, const streaming_operation streaming);
/*! ZSTD_decodeLiteralsBlock() :
* Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored
* in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current
* block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being
* stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write.
*
* @return : nb of bytes read from src (< srcSize )
* note : symbol not declared but exposed for fullbench */
size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */
void* dst, size_t dstCapacity, const streaming_operation streaming)
{
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
{ const BYTE* const istart = (const BYTE*) src;
symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
switch(litEncType)
{
case set_repeat:
DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
ZSTD_FALLTHROUGH;
case set_compressed:
RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
{ size_t lhSize, litSize, litCSize;
U32 singleStream=0;
U32 const lhlCode = (istart[0] >> 2) & 3;
U32 const lhc = MEM_readLE32(istart);
size_t hufSuccess;
size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
switch(lhlCode)
{
case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
/* 2 - 2 - 10 - 10 */
singleStream = !lhlCode;
lhSize = 3;
litSize = (lhc >> 4) & 0x3FF;
litCSize = (lhc >> 14) & 0x3FF;
break;
case 2:
/* 2 - 2 - 14 - 14 */
lhSize = 4;
litSize = (lhc >> 4) & 0x3FFF;
litCSize = lhc >> 18;
break;
case 3:
/* 2 - 2 - 18 - 18 */
lhSize = 5;
litSize = (lhc >> 4) & 0x3FFFF;
litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0);
/* prefetch huffman table if cold */
if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
}
if (litEncType==set_repeat) {
if (singleStream) {
hufSuccess = HUF_decompress1X_usingDTable_bmi2(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
} else {
hufSuccess = HUF_decompress4X_usingDTable_bmi2(
dctx->litBuffer, litSize, istart+lhSize, litCSize,
dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
}
} else {
if (singleStream) {
#if defined(HUF_FORCE_DECOMPRESS_X2)
hufSuccess = HUF_decompress1X_DCtx_wksp(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
sizeof(dctx->workspace));
#else
hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
#endif
} else {
hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
dctx->entropy.hufTable, dctx->litBuffer, litSize,
istart+lhSize, litCSize, dctx->workspace,
sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
}
}
if (dctx->litBufferLocation == ZSTD_split)
{
ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE);
dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
dctx->litBufferEnd -= WILDCOPY_OVERLENGTH;
}
RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
dctx->litEntropy = 1;
if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
return litCSize + lhSize;
}
case set_basic:
{ size_t litSize, lhSize;
U32 const lhlCode = ((istart[0]) >> 2) & 3;
size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
lhSize = 1;
litSize = istart[0] >> 3;
break;
case 1:
lhSize = 2;
litSize = MEM_readLE16(istart) >> 4;
break;
case 3:
lhSize = 3;
litSize = MEM_readLE24(istart) >> 4;
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
if (dctx->litBufferLocation == ZSTD_split)
{
ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
}
else
{
ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize);
}
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+litSize;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+lhSize;
dctx->litSize = litSize;
dctx->litBufferEnd = dctx->litPtr + litSize;
dctx->litBufferLocation = ZSTD_not_in_dst;
return lhSize+litSize;
}
case set_rle:
{ U32 const lhlCode = ((istart[0]) >> 2) & 3;
size_t litSize, lhSize;
size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
switch(lhlCode)
{
case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
lhSize = 1;
litSize = istart[0] >> 3;
break;
case 1:
lhSize = 2;
litSize = MEM_readLE16(istart) >> 4;
break;
case 3:
lhSize = 3;
litSize = MEM_readLE24(istart) >> 4;
RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
break;
}
RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
if (dctx->litBufferLocation == ZSTD_split)
{
ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE);
}
else
{
ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize);
}
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
}
default:
RETURN_ERROR(corruption_detected, "impossible");
}
}
}
/* Default FSE distribution tables.
* These are pre-calculated FSE decoding tables using default distributions as defined in specification :
* https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
* They were generated programmatically with following method :
* - start from default distributions, present in /lib/common/zstd_internal.h
* - generate tables normally, using ZSTD_buildFSETable()
* - printout the content of tables
* - pretify output, report below, test with fuzzer to ensure it's correct */
/* Default FSE distribution table for Literal Lengths */
static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
{ 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
/* nextState, nbAddBits, nbBits, baseVal */
{ 0, 0, 4, 0}, { 16, 0, 4, 0},
{ 32, 0, 5, 1}, { 0, 0, 5, 3},
{ 0, 0, 5, 4}, { 0, 0, 5, 6},
{ 0, 0, 5, 7}, { 0, 0, 5, 9},
{ 0, 0, 5, 10}, { 0, 0, 5, 12},
{ 0, 0, 6, 14}, { 0, 1, 5, 16},
{ 0, 1, 5, 20}, { 0, 1, 5, 22},
{ 0, 2, 5, 28}, { 0, 3, 5, 32},
{ 0, 4, 5, 48}, { 32, 6, 5, 64},
{ 0, 7, 5, 128}, { 0, 8, 6, 256},
{ 0, 10, 6, 1024}, { 0, 12, 6, 4096},
{ 32, 0, 4, 0}, { 0, 0, 4, 1},
{ 0, 0, 5, 2}, { 32, 0, 5, 4},
{ 0, 0, 5, 5}, { 32, 0, 5, 7},
{ 0, 0, 5, 8}, { 32, 0, 5, 10},
{ 0, 0, 5, 11}, { 0, 0, 6, 13},
{ 32, 1, 5, 16}, { 0, 1, 5, 18},
{ 32, 1, 5, 22}, { 0, 2, 5, 24},
{ 32, 3, 5, 32}, { 0, 3, 5, 40},
{ 0, 6, 4, 64}, { 16, 6, 4, 64},
{ 32, 7, 5, 128}, { 0, 9, 6, 512},
{ 0, 11, 6, 2048}, { 48, 0, 4, 0},
{ 16, 0, 4, 1}, { 32, 0, 5, 2},
{ 32, 0, 5, 3}, { 32, 0, 5, 5},
{ 32, 0, 5, 6}, { 32, 0, 5, 8},
{ 32, 0, 5, 9}, { 32, 0, 5, 11},
{ 32, 0, 5, 12}, { 0, 0, 6, 15},
{ 32, 1, 5, 18}, { 32, 1, 5, 20},
{ 32, 2, 5, 24}, { 32, 2, 5, 28},
{ 32, 3, 5, 40}, { 32, 4, 5, 48},
{ 0, 16, 6,65536}, { 0, 15, 6,32768},
{ 0, 14, 6,16384}, { 0, 13, 6, 8192},
}; /* LL_defaultDTable */
/* Default FSE distribution table for Offset Codes */
static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
{ 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
/* nextState, nbAddBits, nbBits, baseVal */
{ 0, 0, 5, 0}, { 0, 6, 4, 61},
{ 0, 9, 5, 509}, { 0, 15, 5,32765},
{ 0, 21, 5,2097149}, { 0, 3, 5, 5},
{ 0, 7, 4, 125}, { 0, 12, 5, 4093},
{ 0, 18, 5,262141}, { 0, 23, 5,8388605},
{ 0, 5, 5, 29}, { 0, 8, 4, 253},
{ 0, 14, 5,16381}, { 0, 20, 5,1048573},
{ 0, 2, 5, 1}, { 16, 7, 4, 125},
{ 0, 11, 5, 2045}, { 0, 17, 5,131069},
{ 0, 22, 5,4194301}, { 0, 4, 5, 13},
{ 16, 8, 4, 253}, { 0, 13, 5, 8189},
{ 0, 19, 5,524285}, { 0, 1, 5, 1},
{ 16, 6, 4, 61}, { 0, 10, 5, 1021},
{ 0, 16, 5,65533}, { 0, 28, 5,268435453},
{ 0, 27, 5,134217725}, { 0, 26, 5,67108861},
{ 0, 25, 5,33554429}, { 0, 24, 5,16777213},
}; /* OF_defaultDTable */
/* Default FSE distribution table for Match Lengths */
static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
{ 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
/* nextState, nbAddBits, nbBits, baseVal */
{ 0, 0, 6, 3}, { 0, 0, 4, 4},
{ 32, 0, 5, 5}, { 0, 0, 5, 6},
{ 0, 0, 5, 8}, { 0, 0, 5, 9},
{ 0, 0, 5, 11}, { 0, 0, 6, 13},
{ 0, 0, 6, 16}, { 0, 0, 6, 19},
{ 0, 0, 6, 22}, { 0, 0, 6, 25},
{ 0, 0, 6, 28}, { 0, 0, 6, 31},
{ 0, 0, 6, 34}, { 0, 1, 6, 37},
{ 0, 1, 6, 41}, { 0, 2, 6, 47},
{ 0, 3, 6, 59}, { 0, 4, 6, 83},
{ 0, 7, 6, 131}, { 0, 9, 6, 515},
{ 16, 0, 4, 4}, { 0, 0, 4, 5},
{ 32, 0, 5, 6}, { 0, 0, 5, 7},
{ 32, 0, 5, 9}, { 0, 0, 5, 10},
{ 0, 0, 6, 12}, { 0, 0, 6, 15},
{ 0, 0, 6, 18}, { 0, 0, 6, 21},
{ 0, 0, 6, 24}, { 0, 0, 6, 27},
{ 0, 0, 6, 30}, { 0, 0, 6, 33},
{ 0, 1, 6, 35}, { 0, 1, 6, 39},
{ 0, 2, 6, 43}, { 0, 3, 6, 51},
{ 0, 4, 6, 67}, { 0, 5, 6, 99},
{ 0, 8, 6, 259}, { 32, 0, 4, 4},
{ 48, 0, 4, 4}, { 16, 0, 4, 5},
{ 32, 0, 5, 7}, { 32, 0, 5, 8},
{ 32, 0, 5, 10}, { 32, 0, 5, 11},
{ 0, 0, 6, 14}, { 0, 0, 6, 17},
{ 0, 0, 6, 20}, { 0, 0, 6, 23},
{ 0, 0, 6, 26}, { 0, 0, 6, 29},
{ 0, 0, 6, 32}, { 0, 16, 6,65539},
{ 0, 15, 6,32771}, { 0, 14, 6,16387},
{ 0, 13, 6, 8195}, { 0, 12, 6, 4099},
{ 0, 11, 6, 2051}, { 0, 10, 6, 1027},
}; /* ML_defaultDTable */
static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U8 nbAddBits)
{
void* ptr = dt;
ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
ZSTD_seqSymbol* const cell = dt + 1;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->nbBits = 0;
cell->nextState = 0;
assert(nbAddBits < 255);
cell->nbAdditionalBits = nbAddBits;
cell->baseValue = baseValue;
}
/* ZSTD_buildFSETable() :
* generate FSE decoding table for one symbol (ll, ml or off)
* cannot fail if input is valid =>
* all inputs are presumed validated at this stage */
FORCE_INLINE_TEMPLATE
void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U8* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_seqSymbol* const tableDecode = dt+1;
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U16* symbolNext = (U16*)wksp;
BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
U32 highThreshold = tableSize - 1;
/* Sanity Checks */
assert(maxSymbolValue <= MaxSeq);
assert(tableLog <= MaxFSELog);
assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
(void)wkspSize;
/* Init, lay down lowprob symbols */
{ ZSTD_seqSymbol_header DTableH;
DTableH.tableLog = tableLog;
DTableH.fastMode = 1;
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
U32 s;
for (s=0; s<maxSV1; s++) {
if (normalizedCounter[s]==-1) {
tableDecode[highThreshold--].baseValue = s;
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
assert(normalizedCounter[s]>=0);
symbolNext[s] = (U16)normalizedCounter[s];
} } }
ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
}
/* Spread symbols */
assert(tableSize <= 512);
/* Specialized symbol spreading for the case when there are
* no low probability (-1 count) symbols. When compressing
* small blocks we avoid low probability symbols to hit this
* case, since header decoding speed matters more.
*/
if (highThreshold == tableSize - 1) {
size_t const tableMask = tableSize-1;
size_t const step = FSE_TABLESTEP(tableSize);
/* First lay down the symbols in order.
* We use a uint64_t to lay down 8 bytes at a time. This reduces branch
* misses since small blocks generally have small table logs, so nearly
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
* our buffer to handle the over-write.
*/
{
U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
for (s=0; s<maxSV1; ++s, sv += add) {
int i;
int const n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
pos += n;
}
}
/* Now we spread those positions across the table.
* The benefit of doing it in two stages is that we avoid the the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
* We unroll the loop twice, since that is what emperically worked best.
*/
{
size_t position = 0;
size_t s;
size_t const unroll = 2;
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
for (s = 0; s < (size_t)tableSize; s += unroll) {
size_t u;
for (u = 0; u < unroll; ++u) {
size_t const uPosition = (position + (u * step)) & tableMask;
tableDecode[uPosition].baseValue = spread[s + u];
}
position = (position + (unroll * step)) & tableMask;
}
assert(position == 0);
}
} else {
U32 const tableMask = tableSize-1;
U32 const step = FSE_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
int i;
int const n = normalizedCounter[s];
for (i=0; i<n; i++) {
tableDecode[position].baseValue = s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
} }
assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
/* Build Decoding table */
{
U32 u;
for (u=0; u<tableSize; u++) {
U32 const symbol = tableDecode[u].baseValue;
U32 const nextState = symbolNext[symbol]++;
tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
assert(nbAdditionalBits[symbol] < 255);
tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol];
tableDecode[u].baseValue = baseValue[symbol];
}
}
}
/* Avoids the FORCE_INLINE of the _body() function. */
static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U8* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
}
#if DYNAMIC_BMI2
BMI2_TARGET_ATTRIBUTE static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U8* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize)
{
ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
}
#endif
void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
const short* normalizedCounter, unsigned maxSymbolValue,
const U32* baseValue, const U8* nbAdditionalBits,
unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
return;
}
#endif
(void)bmi2;
ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
}
/*! ZSTD_buildSeqTable() :
* @return : nb bytes read from src,
* or an error code if it fails */
static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
symbolEncodingType_e type, unsigned max, U32 maxLog,
const void* src, size_t srcSize,
const U32* baseValue, const U8* nbAdditionalBits,
const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
int bmi2)
{
switch(type)
{
case set_rle :
RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
{ U32 const symbol = *(const BYTE*)src;
U32 const baseline = baseValue[symbol];
U8 const nbBits = nbAdditionalBits[symbol];
ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
}
*DTablePtr = DTableSpace;
return 1;
case set_basic :
*DTablePtr = defaultTable;
return 0;
case set_repeat:
RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
/* prefetch FSE table if used */
if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
const void* const pStart = *DTablePtr;
size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
PREFETCH_AREA(pStart, pSize);
}
return 0;
case set_compressed :
{ unsigned tableLog;
S16 norm[MaxSeq+1];
size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
*DTablePtr = DTableSpace;
return headerSize;
}
default :
assert(0);
RETURN_ERROR(GENERIC, "impossible");
}
}
size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
const void* src, size_t srcSize)
{
const BYTE* const istart = (const BYTE*)src;
const BYTE* const iend = istart + srcSize;
const BYTE* ip = istart;
int nbSeq;
DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
/* check */
RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
/* SeqHead */
nbSeq = *ip++;
if (!nbSeq) {
*nbSeqPtr=0;
RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
return 1;
}
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
ip+=2;
} else {
RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
}
}
*nbSeqPtr = nbSeq;
/* FSE table descriptors */
RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
{ symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
ip++;
/* Build DTables */
{ size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
LLtype, MaxLL, LLFSELog,
ip, iend-ip,
LL_base, LL_bits,
LL_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
ZSTD_DCtx_get_bmi2(dctx));
RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += llhSize;
}
{ size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
OFtype, MaxOff, OffFSELog,
ip, iend-ip,
OF_base, OF_bits,
OF_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
ZSTD_DCtx_get_bmi2(dctx));
RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += ofhSize;
}
{ size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
MLtype, MaxML, MLFSELog,
ip, iend-ip,
ML_base, ML_bits,
ML_defaultDTable, dctx->fseEntropy,
dctx->ddictIsCold, nbSeq,
dctx->workspace, sizeof(dctx->workspace),
ZSTD_DCtx_get_bmi2(dctx));
RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
ip += mlhSize;
}
}
return ip-istart;
}
typedef struct {
size_t litLength;
size_t matchLength;
size_t offset;
} seq_t;
typedef struct {
size_t state;
const ZSTD_seqSymbol* table;
} ZSTD_fseState;
typedef struct {
BIT_DStream_t DStream;
ZSTD_fseState stateLL;
ZSTD_fseState stateOffb;
ZSTD_fseState stateML;
size_t prevOffset[ZSTD_REP_NUM];
} seqState_t;
/*! ZSTD_overlapCopy8() :
* Copies 8 bytes from ip to op and updates op and ip where ip <= op.
* If the offset is < 8 then the offset is spread to at least 8 bytes.
*
* Precondition: *ip <= *op
* Postcondition: *op - *op >= 8
*/
HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
assert(*ip <= *op);
if (offset < 8) {
/* close range match, overlap */
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
int const sub2 = dec64table[offset];
(*op)[0] = (*ip)[0];
(*op)[1] = (*ip)[1];
(*op)[2] = (*ip)[2];
(*op)[3] = (*ip)[3];
*ip += dec32table[offset];
ZSTD_copy4(*op+4, *ip);
*ip -= sub2;
} else {
ZSTD_copy8(*op, *ip);
}
*ip += 8;
*op += 8;
assert(*op - *ip >= 8);
}
/*! ZSTD_safecopy() :
* Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
* and write up to 16 bytes past oend_w (op >= oend_w is allowed).
* This function is only called in the uncommon case where the sequence is near the end of the block. It
* should be fast for a single long sequence, but can be slow for several short sequences.
*
* @param ovtype controls the overlap detection
* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
* - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
* The src buffer must be before the dst buffer.
*/
static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
(ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
if (length < 8) {
/* Handle short lengths. */
while (op < oend) *op++ = *ip++;
return;
}
if (ovtype == ZSTD_overlap_src_before_dst) {
/* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
assert(length >= 8);
ZSTD_overlapCopy8(&op, &ip, diff);
length -= 8;
assert(op - ip >= 8);
assert(op <= oend);
}
if (oend <= oend_w) {
/* No risk of overwrite. */
ZSTD_wildcopy(op, ip, length, ovtype);
return;
}
if (op <= oend_w) {
/* Wildcopy until we get close to the end. */
assert(oend > oend_w);
ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
ip += oend_w - op;
op += oend_w - op;
}
/* Handle the leftovers. */
while (op < oend) *op++ = *ip++;
}
/* ZSTD_safecopyDstBeforeSrc():
* This version allows overlap with dst before src, or handles the non-overlap case with dst after src
* Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) {
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
if (length < 8 || diff > -8) {
/* Handle short lengths, close overlaps, and dst not before src. */
while (op < oend) *op++ = *ip++;
return;
}
if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) {
ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap);
ip += oend - WILDCOPY_OVERLENGTH - op;
op += oend - WILDCOPY_OVERLENGTH - op;
}
/* Handle the leftovers. */
while (op < oend) *op++ = *ip++;
}
/* ZSTD_execSequenceEnd():
* This version handles cases that are near the end of the output buffer. It requires
* more careful checks to make sure there is no overflow. By separating out these hard
* and unlikely cases, we can speed up the common cases.
*
* NOTE: This function needs to be fast for a single long sequence, but doesn't need
* to be optimized for many small sequences, since those fall into ZSTD_execSequence().
*/
FORCE_NOINLINE
size_t ZSTD_execSequenceEnd(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
/* bounds checks : careful of address space overflow in 32-bit mode */
RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
assert(op < op + sequenceLength);
assert(oLitEnd < op + sequenceLength);
/* copy literals */
ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
op = oLitEnd;
*litPtr = iLitEnd;
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
match = dictEnd - (prefixStart - match);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
}
}
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
/* ZSTD_execSequenceEndSplitLitBuffer():
* This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
*/
FORCE_NOINLINE
size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* bounds checks : careful of address space overflow in 32-bit mode */
RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
assert(op < op + sequenceLength);
assert(oLitEnd < op + sequenceLength);
/* copy literals */
RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer");
ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength);
op = oLitEnd;
*litPtr = iLitEnd;
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
match = dictEnd - (prefixStart - match);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
}
}
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
HINT_INLINE
size_t ZSTD_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
assert(op != NULL /* Precondition */);
assert(oend_w < oend /* No underflow */);
/* Handle edge cases in a slow path:
* - Read beyond end of literals
* - Match end is within WILDCOPY_OVERLIMIT of oend
* - 32-bit mode and the match length overflows
*/
if (UNLIKELY(
iLitEnd > litLimit ||
oMatchEnd > oend_w ||
(MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
assert(op <= oLitEnd /* No overflow */);
assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
assert(oMatchEnd <= oend /* No underflow */);
assert(iLitEnd <= litLimit /* Literal length is in bounds */);
assert(oLitEnd <= oend_w /* Can wildcopy literals */);
assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
/* Copy Literals:
* Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
* We likely don't need the full 32-byte wildcopy.
*/
assert(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(op, (*litPtr));
if (UNLIKELY(sequence.litLength > 16)) {
ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap);
}
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
/* Copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
match = dictEnd + (match - prefixStart);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
}
}
/* Match within prefix of 1 or more bytes */
assert(op <= oMatchEnd);
assert(oMatchEnd <= oend_w);
assert(match >= prefixStart);
assert(sequence.matchLength >= 1);
/* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
* without overlap checking.
*/
if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
/* We bet on a full wildcopy for matches, since we expect matches to be
* longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes.
*/
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
return sequenceLength;
}
assert(sequence.offset < WILDCOPY_VECLEN);
/* Copy 8 bytes and spread the offset to be >= 8. */
ZSTD_overlapCopy8(&op, &match, sequence.offset);
/* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) {
assert(op < oMatchEnd);
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
}
return sequenceLength;
}
HINT_INLINE
size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
assert(op != NULL /* Precondition */);
assert(oend_w < oend /* No underflow */);
/* Handle edge cases in a slow path:
* - Read beyond end of literals
* - Match end is within WILDCOPY_OVERLIMIT of oend
* - 32-bit mode and the match length overflows
*/
if (UNLIKELY(
iLitEnd > litLimit ||
oMatchEnd > oend_w ||
(MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
assert(op <= oLitEnd /* No overflow */);
assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
assert(oMatchEnd <= oend /* No underflow */);
assert(iLitEnd <= litLimit /* Literal length is in bounds */);
assert(oLitEnd <= oend_w /* Can wildcopy literals */);
assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
/* Copy Literals:
* Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
* We likely don't need the full 32-byte wildcopy.
*/
assert(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(op, (*litPtr));
if (UNLIKELY(sequence.litLength > 16)) {
ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
}
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
/* Copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
match = dictEnd + (match - prefixStart);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
} }
/* Match within prefix of 1 or more bytes */
assert(op <= oMatchEnd);
assert(oMatchEnd <= oend_w);
assert(match >= prefixStart);
assert(sequence.matchLength >= 1);
/* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
* without overlap checking.
*/
if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
/* We bet on a full wildcopy for matches, since we expect matches to be
* longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes.
*/
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
return sequenceLength;
}
assert(sequence.offset < WILDCOPY_VECLEN);
/* Copy 8 bytes and spread the offset to be >= 8. */
ZSTD_overlapCopy8(&op, &match, sequence.offset);
/* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) {
assert(op < oMatchEnd);
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
}
return sequenceLength;
}
static void
ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
{
const void* ptr = dt;
const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
(U32)DStatePtr->state, DTableH->tableLog);
BIT_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
FORCE_INLINE_TEMPLATE void
ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits)
{
size_t const lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = nextState + lowBits;
}
/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
* offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
* bits before reloading. This value is the maximum number of bytes we read
* after reloading when we are decoding long offsets.
*/
#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
(ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
: 0)
typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
FORCE_INLINE_TEMPLATE seq_t
ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
{
seq_t seq;
const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state;
const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state;
const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state;
seq.matchLength = mlDInfo->baseValue;
seq.litLength = llDInfo->baseValue;
{ U32 const ofBase = ofDInfo->baseValue;
BYTE const llBits = llDInfo->nbAdditionalBits;
BYTE const mlBits = mlDInfo->nbAdditionalBits;
BYTE const ofBits = ofDInfo->nbAdditionalBits;
BYTE const totalBits = llBits+mlBits+ofBits;
U16 const llNext = llDInfo->nextState;
U16 const mlNext = mlDInfo->nextState;
U16 const ofNext = ofDInfo->nextState;
U32 const llnbBits = llDInfo->nbBits;
U32 const mlnbBits = mlDInfo->nbBits;
U32 const ofnbBits = ofDInfo->nbBits;
/*
* As gcc has better branch and block analyzers, sometimes it is only
* valuable to mark likelyness for clang, it gives around 3-4% of
* performance.
*/
/* sequence */
{ size_t offset;
#if defined(__clang__)
if (LIKELY(ofBits > 1)) {
#else
if (ofBits > 1) {
#endif
ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
assert(ofBits <= MaxOff);
if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
BIT_reloadDStream(&seqState->DStream);
if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
} else {
offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
}
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
} else {
U32 const ll0 = (llDInfo->baseValue == 0);
if (LIKELY((ofBits == 0))) {
offset = seqState->prevOffset[ll0];
seqState->prevOffset[1] = seqState->prevOffset[!ll0];
seqState->prevOffset[0] = offset;
} else {
offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
{ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
} } }
seq.offset = offset;
}
#if defined(__clang__)
if (UNLIKELY(mlBits > 0))
#else
if (mlBits > 0)
#endif
seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
BIT_reloadDStream(&seqState->DStream);
if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
BIT_reloadDStream(&seqState->DStream);
/* Ensure there are enough bits to read the rest of data in 64-bit mode. */
ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
#if defined(__clang__)
if (UNLIKELY(llBits > 0))
#else
if (llBits > 0)
#endif
seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
if (MEM_32bits())
BIT_reloadDStream(&seqState->DStream);
DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
}
return seq;
}
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
{
size_t const windowSize = dctx->fParams.windowSize;
/* No dictionary used. */
if (dctx->dictContentEndForFuzzing == NULL) return 0;
/* Dictionary is our prefix. */
if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
/* Dictionary is not our ext-dict. */
if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
/* Dictionary is not within our window size. */
if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
/* Dictionary is active. */
return 1;
}
MEM_STATIC void ZSTD_assertValidSequence(
ZSTD_DCtx const* dctx,
BYTE const* op, BYTE const* oend,
seq_t const seq,
BYTE const* prefixStart, BYTE const* virtualStart)
{
#if DEBUGLEVEL >= 1
size_t const windowSize = dctx->fParams.windowSize;
size_t const sequenceSize = seq.litLength + seq.matchLength;
BYTE const* const oLitEnd = op + seq.litLength;
DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
assert(op <= oend);
assert((size_t)(oend - op) >= sequenceSize);
assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
/* Offset must be within the dictionary. */
assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
assert(seq.offset <= windowSize + dictSize);
} else {
/* Offset must be within our window. */
assert(seq.offset <= windowSize);
}
#else
(void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
#endif
}
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
FORCE_INLINE_TEMPLATE size_t
DONT_VECTORIZE
ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + maxDstSize;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd;
const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer");
(void)frame;
/* Regen sequences */
if (nbSeq) {
seqState_t seqState;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
assert(dst != NULL);
ZSTD_STATIC_ASSERT(
BIT_DStream_unfinished < BIT_DStream_completed &&
BIT_DStream_endOfBuffer < BIT_DStream_completed &&
BIT_DStream_completed < BIT_DStream_overflow);
/* decompress without overrunning litPtr begins */
{
seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
/* Align the decompression loop to 32 + 16 bytes.
*
* zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
* speed swings based on the alignment of the decompression loop. This
* performance swing is caused by parts of the decompression loop falling
* out of the DSB. The entire decompression loop should fit in the DSB,
* when it can't we get much worse performance. You can measure if you've
* hit the good case or the bad case with this perf command for some
* compressed file test.zst:
*
* perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
* -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
*
* If you see most cycles served out of the MITE you've hit the bad case.
* If you see most cycles served out of the DSB you've hit the good case.
* If it is pretty even then you may be in an okay case.
*
* This issue has been reproduced on the following CPUs:
* - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
* Use Instruments->Counters to get DSB/MITE cycles.
* I never got performance swings, but I was able to
* go from the good case of mostly DSB to half of the
* cycles served from MITE.
* - Coffeelake: Intel i9-9900k
* - Coffeelake: Intel i7-9700k
*
* I haven't been able to reproduce the instability or DSB misses on any
* of the following CPUS:
* - Haswell
* - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
* - Skylake
*
* Alignment is done for each of the three major decompression loops:
* - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer
* - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer
* - ZSTD_decompressSequences_body
* Alignment choices are made to minimize large swings on bad cases and influence on performance
* from changes external to this code, rather than to overoptimize on the current commit.
*
* If you are seeing performance stability this script can help test.
* It tests on 4 commits in zstd where I saw performance change.
*
* https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
*/
#if defined(__x86_64__)
__asm__(".p2align 6");
# if __GNUC__ >= 7
/* good for gcc-7, gcc-9, and gcc-11 */
__asm__("nop");
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 4");
# if __GNUC__ == 8 || __GNUC__ == 10
/* good for gcc-8 and gcc-10 */
__asm__("nop");
__asm__(".p2align 3");
# endif
# endif
#endif
/* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */
for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) {
size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
if (UNLIKELY(!--nbSeq))
break;
BIT_reloadDStream(&(seqState.DStream));
sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
}
/* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
if (nbSeq > 0) {
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
if (leftoverLit)
{
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequence.litLength -= leftoverLit;
op += leftoverLit;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
{
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
if (--nbSeq)
BIT_reloadDStream(&(seqState.DStream));
}
}
}
if (nbSeq > 0) /* there is remaining lit from extra buffer */
{
#if defined(__x86_64__)
__asm__(".p2align 6");
__asm__("nop");
# if __GNUC__ != 7
/* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */
__asm__(".p2align 4");
__asm__("nop");
__asm__(".p2align 3");
# elif __GNUC__ >= 11
__asm__(".p2align 3");
# else
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 3");
# endif
#endif
for (; ; ) {
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
if (UNLIKELY(!--nbSeq))
break;
BIT_reloadDStream(&(seqState.DStream));
}
}
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq);
RETURN_ERROR_IF(nbSeq, corruption_detected, "");
RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
{
size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
}
{ size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
FORCE_INLINE_TEMPLATE size_t
DONT_VECTORIZE
ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart);
const BYTE* const vBase = (const BYTE*)(dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd);
DEBUGLOG(5, "ZSTD_decompressSequences_body");
(void)frame;
/* Regen sequences */
if (nbSeq) {
seqState_t seqState;
dctx->fseEntropy = 1;
{ U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)),
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
assert(dst != NULL);
ZSTD_STATIC_ASSERT(
BIT_DStream_unfinished < BIT_DStream_completed &&
BIT_DStream_endOfBuffer < BIT_DStream_completed &&
BIT_DStream_completed < BIT_DStream_overflow);
#if defined(__x86_64__)
__asm__(".p2align 6");
__asm__("nop");
# if __GNUC__ >= 7
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 3");
# else
__asm__(".p2align 4");
__asm__("nop");
__asm__(".p2align 3");
# endif
#endif
for ( ; ; ) {
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
if (UNLIKELY(!--nbSeq))
break;
BIT_reloadDStream(&(seqState.DStream));
}
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
RETURN_ERROR_IF(nbSeq, corruption_detected, "");
RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
static size_t
ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
static size_t
ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
FORCE_INLINE_TEMPLATE size_t
ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
const BYTE* const prefixStart, const BYTE* const dictEnd)
{
prefetchPos += sequence.litLength;
{ const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : memory address is only used for prefetching, not for dereferencing */
PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
}
return prefetchPos + sequence.matchLength;
}
/* This decoding function employs prefetching
* to reduce latency impact of cache misses.
* It's generally employed when block contains a significant portion of long-distance matches
* or when coupled with a "cold" dictionary */
FORCE_INLINE_TEMPLATE size_t
ZSTD_decompressSequencesLong_body(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd;
const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
(void)frame;
/* Regen sequences */
if (nbSeq) {
#define STORED_SEQS 8
#define STORED_SEQS_MASK (STORED_SEQS-1)
#define ADVANCED_SEQS STORED_SEQS
seq_t sequences[STORED_SEQS];
int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
seqState_t seqState;
int seqNb;
size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */
dctx->fseEntropy = 1;
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
assert(dst != NULL);
assert(iend >= ip);
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
/* prepare in advance */
for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb] = sequence;
}
RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
/* decompress without stomping litBuffer */
for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb < nbSeq); seqNb++) {
seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t oneSeqSize;
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd)
{
/* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
if (leftoverLit)
{
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit;
op += leftoverLit;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
else
{
/* lit buffer is either wholly contained in first or second split, or not split at all*/
oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
}
RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
/* finish queue */
seqNb -= seqAdvance;
for ( ; seqNb<nbSeq ; seqNb++) {
seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd)
{
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
if (leftoverLit)
{
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequence->litLength -= leftoverLit;
op += leftoverLit;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
{
size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
}
else
{
size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
}
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */
{
size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
}
{ size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
static size_t
ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
#if DYNAMIC_BMI2
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
static BMI2_TARGET_ATTRIBUTE size_t
DONT_VECTORIZE
ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
static BMI2_TARGET_ATTRIBUTE size_t
DONT_VECTORIZE
ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
static BMI2_TARGET_ATTRIBUTE size_t
ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
#endif /* DYNAMIC_BMI2 */
typedef size_t (*ZSTD_decompressSequences_t)(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame);
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
static size_t
ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
DEBUGLOG(5, "ZSTD_decompressSequences");
#if DYNAMIC_BMI2
if (ZSTD_DCtx_get_bmi2(dctx)) {
return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif
return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
static size_t
ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer");
#if DYNAMIC_BMI2
if (ZSTD_DCtx_get_bmi2(dctx)) {
return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif
return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
/* ZSTD_decompressSequencesLong() :
* decompression function triggered when a minimum share of offsets is considered "long",
* aka out of cache.
* note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
* This function will try to mitigate main memory latency through the use of prefetching */
static size_t
ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
DEBUGLOG(5, "ZSTD_decompressSequencesLong");
#if DYNAMIC_BMI2
if (ZSTD_DCtx_get_bmi2(dctx)) {
return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif
return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
/* ZSTD_getLongOffsetsShare() :
* condition : offTable must be valid
* @return : "share" of long offsets (arbitrarily defined as > (1<<23))
* compared to maximum possible of (1<<OffFSELog) */
static unsigned
ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
{
const void* ptr = offTable;
U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
const ZSTD_seqSymbol* table = offTable + 1;
U32 const max = 1 << tableLog;
U32 u, total = 0;
DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
assert(max <= (1 << OffFSELog)); /* max not too large */
for (u=0; u<max; u++) {
if (table[u].nbAdditionalBits > 22) total += 1;
}
assert(tableLog <= OffFSELog);
total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
return total;
}
#endif
size_t
ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, const int frame, const streaming_operation streaming)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
/* isLongOffset must be true if there are long offsets.
* Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
* We don't expect that to be the case in 64-bit mode.
* In block mode, window size is not known, so we have to be conservative.
* (note: but it could be evaluated from current-lowLimit)
*/
ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
/* Decode literals section */
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming);
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
}
/* Build Decoding Tables */
{
/* These macros control at build-time which decompressor implementation
* we use. If neither is defined, we do some inspection and dispatch at
* runtime.
*/
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
int usePrefetchDecoder = dctx->ddictIsCold;
#endif
int nbSeq;
size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
if (ZSTD_isError(seqHSize)) return seqHSize;
ip += seqHSize;
srcSize -= seqHSize;
RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
if ( !usePrefetchDecoder
&& (!frame || (dctx->fParams.windowSize > (1<<24)))
&& (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
usePrefetchDecoder = (shareLongOffsets >= minShare);
}
#endif
dctx->ddictIsCold = 0;
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
if (usePrefetchDecoder)
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
#endif
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
/* else */
if (dctx->litBufferLocation == ZSTD_split)
return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
else
return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
#endif
}
}
void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
{
if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
dctx->dictEnd = dctx->previousDstEnd;
dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
dctx->prefixStart = dst;
dctx->previousDstEnd = dst;
}
}
size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
size_t dSize;
ZSTD_checkContinuity(dctx, dst, dstCapacity);
dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming);
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
}
| linux-master | lib/zstd/decompress/zstd_decompress_block.c |
/* ******************************************************************
* Common functions of New Generation Entropy library
* Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* *************************************
* Dependencies
***************************************/
#include "mem.h"
#include "error_private.h" /* ERR_*, ERROR */
#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
#include "fse.h"
#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
#include "huf.h"
/*=== Version ===*/
unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
/*=== Error Management ===*/
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
static U32 FSE_ctz(U32 val)
{
assert(val != 0);
{
# if (__GNUC__ >= 3) /* GCC Intrinsic */
return __builtin_ctz(val);
# else /* Software version */
U32 count = 0;
while ((val & 1) == 0) {
val >>= 1;
++count;
}
return count;
# endif
}
}
FORCE_INLINE_TEMPLATE
size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
unsigned const maxSV1 = *maxSVPtr + 1;
int previous0 = 0;
if (hbSize < 8) {
/* This function only works when hbSize >= 8 */
char buffer[8] = {0};
ZSTD_memcpy(buffer, headerBuffer, hbSize);
{ size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
buffer, sizeof(buffer));
if (FSE_isError(countSize)) return countSize;
if (countSize > hbSize) return ERROR(corruption_detected);
return countSize;
} }
assert(hbSize >= 8);
/* init */
ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
for (;;) {
if (previous0) {
/* Count the number of repeats. Each time the
* 2-bit repeat code is 0b11 there is another
* repeat.
* Avoid UB by setting the high bit to 1.
*/
int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
while (repeats >= 12) {
charnum += 3 * 12;
if (LIKELY(ip <= iend-7)) {
ip += 3;
} else {
bitCount -= (int)(8 * (iend - 7 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
}
charnum += 3 * repeats;
bitStream >>= 2 * repeats;
bitCount += 2 * repeats;
/* Add the final repeat which isn't 0b11. */
assert((bitStream & 3) < 3);
charnum += bitStream & 3;
bitCount += 2;
/* This is an error, but break and return an error
* at the end, because returning out of a loop makes
* it harder for the compiler to optimize.
*/
if (charnum >= maxSV1) break;
/* We don't need to set the normalized count to 0
* because we already memset the whole buffer to 0.
*/
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
assert((bitCount >> 3) <= 3); /* For first condition to work */
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
}
{
int const max = (2*threshold-1) - remaining;
int count;
if ((bitStream & (threshold-1)) < (U32)max) {
count = bitStream & (threshold-1);
bitCount += nbBits-1;
} else {
count = bitStream & (2*threshold-1);
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
/* When it matters (small blocks), this is a
* predictable branch, because we don't use -1.
*/
if (count >= 0) {
remaining -= count;
} else {
assert(count == -1);
remaining += count;
}
normalizedCounter[charnum++] = (short)count;
previous0 = !count;
assert(threshold > 1);
if (remaining < threshold) {
/* This branch can be folded into the
* threshold update condition because we
* know that threshold > 1.
*/
if (remaining <= 1) break;
nbBits = BIT_highbit32(remaining) + 1;
threshold = 1 << (nbBits - 1);
}
if (charnum >= maxSV1) break;
if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
bitCount &= 31;
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> bitCount;
} }
if (remaining != 1) return ERROR(corruption_detected);
/* Only possible when there are too many zeros. */
if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
if (bitCount > 32) return ERROR(corruption_detected);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
return ip-istart;
}
/* Avoids the FORCE_INLINE of the _body() function. */
static size_t FSE_readNCount_body_default(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
#if DYNAMIC_BMI2
BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
#endif
size_t FSE_readNCount_bmi2(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
#endif
(void)bmi2;
return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
}
size_t FSE_readNCount(
short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
}
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
`huffWeight` is destination buffer.
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
@return : size read from `src` , or an error Code .
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
*/
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
}
FORCE_INLINE_TEMPLATE size_t
HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
int bmi2)
{
U32 weightTotal;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
/* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) { /* special header */
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
{ U32 n;
for (n=0; n<oSize; n+=2) {
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
} } }
else { /* header compressed with FSE (normal case) */
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
/* max (hwSize-1) values decoded, as last one is implied */
oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
if (FSE_isError(oSize)) return oSize;
}
/* collect weight stats */
ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
weightTotal = 0;
{ U32 n; for (n=0; n<oSize; n++) {
if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
} }
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
{ U32 const tableLog = BIT_highbit32(weightTotal) + 1;
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
*tableLogPtr = tableLog;
/* determine last weight */
{ U32 const total = 1 << tableLog;
U32 const rest = total - weightTotal;
U32 const verif = 1 << BIT_highbit32(rest);
U32 const lastWeight = BIT_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
} }
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
return iSize+1;
}
/* Avoids the FORCE_INLINE of the _body() function. */
static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
{
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
}
#if DYNAMIC_BMI2
static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize)
{
return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
}
#endif
size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize,
void* workSpace, size_t wkspSize,
int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
#endif
(void)bmi2;
return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
}
| linux-master | lib/zstd/common/entropy_common.c |
/* ******************************************************************
* FSE : Finite State Entropy decoder
* Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
* - Public forum : https://groups.google.com/forum/#!forum/lz4c
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/* **************************************************************
* Includes
****************************************************************/
#include "debug.h" /* assert */
#include "bitstream.h"
#include "compiler.h"
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#include "error_private.h"
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h"
/* **************************************************************
* Error Management
****************************************************************/
#define FSE_isError ERR_isError
#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
/* **************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
/* Function templates */
FSE_DTable* FSE_createDTable (unsigned tableLog)
{
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
}
void FSE_freeDTable (FSE_DTable* dt)
{
ZSTD_free(dt);
}
static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
U16* symbolNext = (U16*)workSpace;
BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U32 highThreshold = tableSize-1;
/* Sanity Checks */
if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
{ FSE_DTableHeader DTableH;
DTableH.tableLog = (U16)tableLog;
DTableH.fastMode = 1;
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
U32 s;
for (s=0; s<maxSV1; s++) {
if (normalizedCounter[s]==-1) {
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
symbolNext[s] = normalizedCounter[s];
} } }
ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
}
/* Spread symbols */
if (highThreshold == tableSize - 1) {
size_t const tableMask = tableSize-1;
size_t const step = FSE_TABLESTEP(tableSize);
/* First lay down the symbols in order.
* We use a uint64_t to lay down 8 bytes at a time. This reduces branch
* misses since small blocks generally have small table logs, so nearly
* all symbols have counts <= 8. We ensure we have 8 bytes at the end of
* our buffer to handle the over-write.
*/
{
U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
for (s=0; s<maxSV1; ++s, sv += add) {
int i;
int const n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
pos += n;
}
}
/* Now we spread those positions across the table.
* The benefit of doing it in two stages is that we avoid the the
* variable size inner loop, which caused lots of branch misses.
* Now we can run through all the positions without any branch misses.
* We unroll the loop twice, since that is what emperically worked best.
*/
{
size_t position = 0;
size_t s;
size_t const unroll = 2;
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
for (s = 0; s < (size_t)tableSize; s += unroll) {
size_t u;
for (u = 0; u < unroll; ++u) {
size_t const uPosition = (position + (u * step)) & tableMask;
tableDecode[uPosition].symbol = spread[s + u];
}
position = (position + (unroll * step)) & tableMask;
}
assert(position == 0);
}
} else {
U32 const tableMask = tableSize-1;
U32 const step = FSE_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
int i;
for (i=0; i<normalizedCounter[s]; i++) {
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
} }
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
/* Build Decoding table */
{ U32 u;
for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
U32 const nextState = symbolNext[symbol]++;
tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
} }
return 0;
}
size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
}
#ifndef FSE_COMMONDEFS_ONLY
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSV1 = tableMask+1;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<maxSV1; s++) {
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BIT_DStream_t bitD;
FSE_DState_t state1;
FSE_DState_t state2;
/* Init */
CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
op[0] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[1] = FSE_GETSYMBOL(&state2);
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
op[2] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[3] = FSE_GETSYMBOL(&state2);
}
/* tail */
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
while (1) {
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSE_GETSYMBOL(&state1);
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
*op++ = FSE_GETSYMBOL(&state2);
break;
}
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSE_GETSYMBOL(&state2);
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
*op++ = FSE_GETSYMBOL(&state1);
break;
} }
return op-ostart;
}
size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt)
{
const void* ptr = dt;
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
{
return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
}
typedef struct {
short ncount[FSE_MAX_SYMBOL_VALUE + 1];
FSE_DTable dtable[1]; /* Dynamically sized */
} FSE_DecompressWksp;
FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
void* dst, size_t dstCapacity,
const void* cSrc, size_t cSrcSize,
unsigned maxLog, void* workSpace, size_t wkspSize,
int bmi2)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
/* normal FSE decoding mode */
{
size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
if (FSE_isError(NCountLength)) return NCountLength;
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
assert(NCountLength <= cSrcSize);
ip += NCountLength;
cSrcSize -= NCountLength;
}
if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
{
const void* ptr = wksp->dtable;
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
}
}
/* Avoids the FORCE_INLINE of the _body() function. */
static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
{
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
}
#if DYNAMIC_BMI2
BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
{
return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
}
#endif
size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
{
#if DYNAMIC_BMI2
if (bmi2) {
return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
#endif
(void)bmi2;
return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
}
typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
#endif /* FSE_COMMONDEFS_ONLY */
| linux-master | lib/zstd/common/fse_decompress.c |
/* ******************************************************************
* debug
* Part of FSE library
* Copyright (c) Yann Collet, Facebook, Inc.
*
* You can contact the author at :
* - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
****************************************************************** */
/*
* This module only hosts one global variable
* which can be used to dynamically influence the verbosity of traces,
* such as DEBUGLOG and RAWLOG
*/
#include "debug.h"
int g_debuglevel = DEBUGLEVEL;
| linux-master | lib/zstd/common/debug.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*-*************************************
* Dependencies
***************************************/
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
#include "error_private.h"
#include "zstd_internal.h"
/*-****************************************
* Version
******************************************/
unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
/*-****************************************
* ZSTD Error Management
******************************************/
#undef ZSTD_isError /* defined within zstd_internal.h */
/*! ZSTD_isError() :
* tells if a return value is an error code
* symbol is required for external callers */
unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
/*! ZSTD_getErrorName() :
* provides error code string from function result (useful for debugging) */
const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*! ZSTD_getError() :
* convert a `size_t` function result into a proper ZSTD_errorCode enum */
ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
/*! ZSTD_getErrorString() :
* provides error code string from enum */
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
/*=**************************************************************
* Custom allocator
****************************************************************/
void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
{
if (customMem.customAlloc)
return customMem.customAlloc(customMem.opaque, size);
return ZSTD_malloc(size);
}
void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
{
if (customMem.customAlloc) {
/* calloc implemented as malloc+memset;
* not as efficient as calloc, but next best guess for custom malloc */
void* const ptr = customMem.customAlloc(customMem.opaque, size);
ZSTD_memset(ptr, 0, size);
return ptr;
}
return ZSTD_calloc(1, size);
}
void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
{
if (ptr!=NULL) {
if (customMem.customFree)
customMem.customFree(customMem.opaque, ptr);
else
ZSTD_free(ptr);
}
}
| linux-master | lib/zstd/common/zstd_common.c |
/*
* Copyright (c) Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/* The purpose of this file is to have a single list of error strings embedded in binary */
#include "error_private.h"
const char* ERR_getErrorString(ERR_enum code)
{
#ifdef ZSTD_STRIP_ERROR_STRINGS
(void)code;
return "Error strings stripped";
#else
static const char* const notErrorCode = "Unspecified error code";
switch( code )
{
case PREFIX(no_error): return "No error detected";
case PREFIX(GENERIC): return "Error (generic)";
case PREFIX(prefix_unknown): return "Unknown frame descriptor";
case PREFIX(version_unsupported): return "Version not supported";
case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
case PREFIX(corruption_detected): return "Corrupted block detected";
case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
case PREFIX(parameter_unsupported): return "Unsupported parameter";
case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
case PREFIX(init_missing): return "Context should be init first";
case PREFIX(memory_allocation): return "Allocation error : not enough memory";
case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
case PREFIX(dictionary_wrong): return "Dictionary mismatch";
case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
case PREFIX(srcSize_wrong): return "Src size is incorrect";
case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
/* following error codes are not stable and may be removed or changed in a future version */
case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
case PREFIX(maxCode):
default: return notErrorCode;
}
#endif
}
| linux-master | lib/zstd/common/error_private.c |
/* inflate.c -- zlib decompression
* Copyright (C) 1995-2005 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Based on zlib 1.2.3 but modified for the Linux Kernel by
* Richard Purdie <[email protected]>
*
* Changes mainly for static instead of dynamic memory allocation
*
*/
#include <linux/zutil.h>
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#include "infutil.h"
/* architecture-specific bits */
#ifdef CONFIG_ZLIB_DFLTCC
# include "../zlib_dfltcc/dfltcc_inflate.h"
#else
#define INFLATE_RESET_HOOK(strm) do {} while (0)
#define INFLATE_TYPEDO_HOOK(strm, flush) do {} while (0)
#define INFLATE_NEED_UPDATEWINDOW(strm) 1
#define INFLATE_NEED_CHECKSUM(strm) 1
#endif
int zlib_inflate_workspacesize(void)
{
return sizeof(struct inflate_workspace);
}
int zlib_inflateReset(z_streamp strm)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
strm->total_in = strm->total_out = state->total = 0;
strm->msg = NULL;
strm->adler = 1; /* to support ill-conceived Java test suite */
state->mode = HEAD;
state->last = 0;
state->havedict = 0;
state->dmax = 32768U;
state->hold = 0;
state->bits = 0;
state->lencode = state->distcode = state->next = state->codes;
/* Initialise Window */
state->wsize = 1U << state->wbits;
state->write = 0;
state->whave = 0;
INFLATE_RESET_HOOK(strm);
return Z_OK;
}
int zlib_inflateInit2(z_streamp strm, int windowBits)
{
struct inflate_state *state;
if (strm == NULL) return Z_STREAM_ERROR;
strm->msg = NULL; /* in case we return an error */
state = &WS(strm)->inflate_state;
strm->state = (struct internal_state *)state;
if (windowBits < 0) {
state->wrap = 0;
windowBits = -windowBits;
}
else {
state->wrap = (windowBits >> 4) + 1;
}
if (windowBits < 8 || windowBits > 15) {
return Z_STREAM_ERROR;
}
state->wbits = (unsigned)windowBits;
#ifdef CONFIG_ZLIB_DFLTCC
/*
* DFLTCC requires the window to be page aligned.
* Thus, we overallocate and take the aligned portion of the buffer.
*/
state->window = PTR_ALIGN(&WS(strm)->working_window[0], PAGE_SIZE);
#else
state->window = &WS(strm)->working_window[0];
#endif
return zlib_inflateReset(strm);
}
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. This returns fixed tables from inffixed.h.
*/
static void zlib_fixedtables(struct inflate_state *state)
{
# include "inffixed.h"
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
/*
Update the window with the last wsize (normally 32K) bytes written before
returning. This is only called when a window is already in use, or when
output has been written during this inflate call, but the end of the deflate
stream has not been reached yet. It is also called to window dictionary data
when a dictionary is loaded.
Providing output buffers larger than 32K to inflate() should provide a speed
advantage, since only the last 32K of output is copied to the sliding window
upon return from inflate(), and since all distances after the first 32K of
output will fall in the output data, making match copies simpler and faster.
The advantage may be dependent on the size of the processor's data caches.
*/
static void zlib_updatewindow(z_streamp strm, unsigned out)
{
struct inflate_state *state;
unsigned copy, dist;
state = (struct inflate_state *)strm->state;
/* copy state->wsize or less output bytes into the circular window */
copy = out - strm->avail_out;
if (copy >= state->wsize) {
memcpy(state->window, strm->next_out - state->wsize, state->wsize);
state->write = 0;
state->whave = state->wsize;
}
else {
dist = state->wsize - state->write;
if (dist > copy) dist = copy;
memcpy(state->window + state->write, strm->next_out - copy, dist);
copy -= dist;
if (copy) {
memcpy(state->window, strm->next_out - copy, copy);
state->write = copy;
state->whave = state->wsize;
}
else {
state->write += dist;
if (state->write == state->wsize) state->write = 0;
if (state->whave < state->wsize) state->whave += dist;
}
}
}
/*
* At the end of a Deflate-compressed PPP packet, we expect to have seen
* a `stored' block type value but not the (zero) length bytes.
*/
/*
Returns true if inflate is currently at the end of a block generated by
Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
implementation to provide an additional safety check. PPP uses
Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
block. When decompressing, PPP checks that at the end of input packet,
inflate is waiting for these length bytes.
*/
static int zlib_inflateSyncPacket(z_streamp strm)
{
struct inflate_state *state;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->mode == STORED && state->bits == 0) {
state->mode = TYPE;
return Z_OK;
}
return Z_DATA_ERROR;
}
/* Macros for inflate(): */
/* check function to use adler32() for zlib or crc32() for gzip */
#define UPDATE(check, buf, len) zlib_adler32(check, buf, len)
/* Load registers with state in inflate() for speed */
#define LOAD() \
do { \
put = strm->next_out; \
left = strm->avail_out; \
next = strm->next_in; \
have = strm->avail_in; \
hold = state->hold; \
bits = state->bits; \
} while (0)
/* Restore state from registers in inflate() */
#define RESTORE() \
do { \
strm->next_out = put; \
strm->avail_out = left; \
strm->next_in = next; \
strm->avail_in = have; \
state->hold = hold; \
state->bits = bits; \
} while (0)
/* Clear the input bit accumulator */
#define INITBITS() \
do { \
hold = 0; \
bits = 0; \
} while (0)
/* Get a byte of input into the bit accumulator, or return from inflate()
if there is no input available. */
#define PULLBYTE() \
do { \
if (have == 0) goto inf_leave; \
have--; \
hold += (unsigned long)(*next++) << bits; \
bits += 8; \
} while (0)
/* Assure that there are at least n bits in the bit accumulator. If there is
not enough available input to do that, then return from inflate(). */
#define NEEDBITS(n) \
do { \
while (bits < (unsigned)(n)) \
PULLBYTE(); \
} while (0)
/* Return the low n bits of the bit accumulator (n < 16) */
#define BITS(n) \
((unsigned)hold & ((1U << (n)) - 1))
/* Remove n bits from the bit accumulator */
#define DROPBITS(n) \
do { \
hold >>= (n); \
bits -= (unsigned)(n); \
} while (0)
/* Remove zero to seven bits as needed to go to a byte boundary */
#define BYTEBITS() \
do { \
hold >>= bits & 7; \
bits -= bits & 7; \
} while (0)
/*
inflate() uses a state machine to process as much input data and generate as
much output data as possible before returning. The state machine is
structured roughly as follows:
for (;;) switch (state) {
...
case STATEn:
if (not enough input data or output space to make progress)
return;
... make progress ...
state = STATEm;
break;
...
}
so when inflate() is called again, the same case is attempted again, and
if the appropriate resources are provided, the machine proceeds to the
next state. The NEEDBITS() macro is usually the way the state evaluates
whether it can proceed or should return. NEEDBITS() does the return if
the requested bits are not available. The typical use of the BITS macros
is:
NEEDBITS(n);
... do something with BITS(n) ...
DROPBITS(n);
where NEEDBITS(n) either returns from inflate() if there isn't enough
input left to load n bits into the accumulator, or it continues. BITS(n)
gives the low n bits in the accumulator. When done, DROPBITS(n) drops
the low n bits off the accumulator. INITBITS() clears the accumulator
and sets the number of available bits to zero. BYTEBITS() discards just
enough bits to put the accumulator on a byte boundary. After BYTEBITS()
and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
if there is no input available. The decoding of variable length codes uses
PULLBYTE() directly in order to pull just enough bytes to decode the next
code, and no more.
Some states loop until they get enough input, making sure that enough
state information is maintained to continue the loop where it left off
if NEEDBITS() returns in the loop. For example, want, need, and keep
would all have to actually be part of the saved state in case NEEDBITS()
returns:
case STATEw:
while (want < need) {
NEEDBITS(n);
keep[want++] = BITS(n);
DROPBITS(n);
}
state = STATEx;
case STATEx:
As shown above, if the next state is also the next case, then the break
is omitted.
A state may also return if there is not enough output space available to
complete that state. Those states are copying stored data, writing a
literal byte, and copying a matching string.
When returning, a "goto inf_leave" is used to update the total counters,
update the check value, and determine whether any progress has been made
during that inflate() call in order to return the proper return code.
Progress is defined as a change in either strm->avail_in or strm->avail_out.
When there is a window, goto inf_leave will update the window with the last
output written. If a goto inf_leave occurs in the middle of decompression
and there is no window currently, goto inf_leave will create one and copy
output to the window for the next call of inflate().
In this implementation, the flush parameter of inflate() only affects the
return code (per zlib.h). inflate() always writes as much as possible to
strm->next_out, given the space available and the provided input--the effect
documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
the allocation of and copying into a sliding window until necessary, which
provides the effect documented in zlib.h for Z_FINISH when the entire input
stream available. So the only thing the flush parameter actually does is:
when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
will return Z_BUF_ERROR if it has not reached the end of the stream.
*/
int zlib_inflate(z_streamp strm, int flush)
{
struct inflate_state *state;
const unsigned char *next; /* next input */
unsigned char *put; /* next output */
unsigned have, left; /* available input and output */
unsigned long hold; /* bit buffer */
unsigned bits; /* bits in bit buffer */
unsigned in, out; /* save starting available input and output */
unsigned copy; /* number of stored or match bytes to copy */
unsigned char *from; /* where to copy match bytes from */
code this; /* current decoding table entry */
code last; /* parent table entry */
unsigned len; /* length to copy for repeats, bits to drop */
int ret; /* return code */
static const unsigned short order[19] = /* permutation of code lengths */
{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
/* Do not check for strm->next_out == NULL here as ppc zImage
inflates to strm->next_out = 0 */
if (strm == NULL || strm->state == NULL ||
(strm->next_in == NULL && strm->avail_in != 0))
return Z_STREAM_ERROR;
state = (struct inflate_state *)strm->state;
if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
LOAD();
in = have;
out = left;
ret = Z_OK;
for (;;)
switch (state->mode) {
case HEAD:
if (state->wrap == 0) {
state->mode = TYPEDO;
break;
}
NEEDBITS(16);
if (
((BITS(8) << 8) + (hold >> 8)) % 31) {
strm->msg = (char *)"incorrect header check";
state->mode = BAD;
break;
}
if (BITS(4) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
state->mode = BAD;
break;
}
DROPBITS(4);
len = BITS(4) + 8;
if (len > state->wbits) {
strm->msg = (char *)"invalid window size";
state->mode = BAD;
break;
}
state->dmax = 1U << len;
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = hold & 0x200 ? DICTID : TYPE;
INITBITS();
break;
case DICTID:
NEEDBITS(32);
strm->adler = state->check = REVERSE(hold);
INITBITS();
state->mode = DICT;
fallthrough;
case DICT:
if (state->havedict == 0) {
RESTORE();
return Z_NEED_DICT;
}
strm->adler = state->check = zlib_adler32(0L, NULL, 0);
state->mode = TYPE;
fallthrough;
case TYPE:
if (flush == Z_BLOCK) goto inf_leave;
fallthrough;
case TYPEDO:
INFLATE_TYPEDO_HOOK(strm, flush);
if (state->last) {
BYTEBITS();
state->mode = CHECK;
break;
}
NEEDBITS(3);
state->last = BITS(1);
DROPBITS(1);
switch (BITS(2)) {
case 0: /* stored block */
state->mode = STORED;
break;
case 1: /* fixed block */
zlib_fixedtables(state);
state->mode = LEN; /* decode codes */
break;
case 2: /* dynamic block */
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
break;
case STORED:
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
state->length = (unsigned)hold & 0xffff;
INITBITS();
state->mode = COPY;
fallthrough;
case COPY:
copy = state->length;
if (copy) {
if (copy > have) copy = have;
if (copy > left) copy = left;
if (copy == 0) goto inf_leave;
memcpy(put, next, copy);
have -= copy;
next += copy;
left -= copy;
put += copy;
state->length -= copy;
break;
}
state->mode = TYPE;
break;
case TABLE:
NEEDBITS(14);
state->nlen = BITS(5) + 257;
DROPBITS(5);
state->ndist = BITS(5) + 1;
DROPBITS(5);
state->ncode = BITS(4) + 4;
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
state->mode = BAD;
break;
}
#endif
state->have = 0;
state->mode = LENLENS;
fallthrough;
case LENLENS:
while (state->have < state->ncode) {
NEEDBITS(3);
state->lens[order[state->have++]] = (unsigned short)BITS(3);
DROPBITS(3);
}
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (code const *)(state->next);
state->lenbits = 7;
ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
state->mode = BAD;
break;
}
state->have = 0;
state->mode = CODELENS;
fallthrough;
case CODELENS:
while (state->have < state->nlen + state->ndist) {
for (;;) {
this = state->lencode[BITS(state->lenbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if (this.val < 16) {
NEEDBITS(this.bits);
DROPBITS(this.bits);
state->lens[state->have++] = this.val;
}
else {
if (this.val == 16) {
NEEDBITS(this.bits + 2);
DROPBITS(this.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
len = state->lens[state->have - 1];
copy = 3 + BITS(2);
DROPBITS(2);
}
else if (this.val == 17) {
NEEDBITS(this.bits + 3);
DROPBITS(this.bits);
len = 0;
copy = 3 + BITS(3);
DROPBITS(3);
}
else {
NEEDBITS(this.bits + 7);
DROPBITS(this.bits);
len = 0;
copy = 11 + BITS(7);
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
state->mode = BAD;
break;
}
while (copy--)
state->lens[state->have++] = (unsigned short)len;
}
}
/* handle error breaks in while */
if (state->mode == BAD) break;
/* build code tables */
state->next = state->codes;
state->lencode = (code const *)(state->next);
state->lenbits = 9;
ret = zlib_inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
state->distcode = (code const *)(state->next);
state->distbits = 6;
ret = zlib_inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
state->mode = BAD;
break;
}
state->mode = LEN;
fallthrough;
case LEN:
if (have >= 6 && left >= 258) {
RESTORE();
inflate_fast(strm, out);
LOAD();
break;
}
for (;;) {
this = state->lencode[BITS(state->lenbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if (this.op && (this.op & 0xf0) == 0) {
last = this;
for (;;) {
this = state->lencode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + this.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
}
DROPBITS(this.bits);
state->length = (unsigned)this.val;
if ((int)(this.op) == 0) {
state->mode = LIT;
break;
}
if (this.op & 32) {
state->mode = TYPE;
break;
}
if (this.op & 64) {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
state->extra = (unsigned)(this.op) & 15;
state->mode = LENEXT;
fallthrough;
case LENEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->length += BITS(state->extra);
DROPBITS(state->extra);
}
state->mode = DIST;
fallthrough;
case DIST:
for (;;) {
this = state->distcode[BITS(state->distbits)];
if ((unsigned)(this.bits) <= bits) break;
PULLBYTE();
}
if ((this.op & 0xf0) == 0) {
last = this;
for (;;) {
this = state->distcode[last.val +
(BITS(last.bits + last.op) >> last.bits)];
if ((unsigned)(last.bits + this.bits) <= bits) break;
PULLBYTE();
}
DROPBITS(last.bits);
}
DROPBITS(this.bits);
if (this.op & 64) {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
state->offset = (unsigned)this.val;
state->extra = (unsigned)(this.op) & 15;
state->mode = DISTEXT;
fallthrough;
case DISTEXT:
if (state->extra) {
NEEDBITS(state->extra);
state->offset += BITS(state->extra);
DROPBITS(state->extra);
}
#ifdef INFLATE_STRICT
if (state->offset > state->dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
if (state->offset > state->whave + out - left) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
state->mode = MATCH;
fallthrough;
case MATCH:
if (left == 0) goto inf_leave;
copy = out - left;
if (state->offset > copy) { /* copy from window */
copy = state->offset - copy;
if (copy > state->write) {
copy -= state->write;
from = state->window + (state->wsize - copy);
}
else
from = state->window + (state->write - copy);
if (copy > state->length) copy = state->length;
}
else { /* copy from output */
from = put - state->offset;
copy = state->length;
}
if (copy > left) copy = left;
left -= copy;
state->length -= copy;
do {
*put++ = *from++;
} while (--copy);
if (state->length == 0) state->mode = LEN;
break;
case LIT:
if (left == 0) goto inf_leave;
*put++ = (unsigned char)(state->length);
left--;
state->mode = LEN;
break;
case CHECK:
if (state->wrap) {
NEEDBITS(32);
out -= left;
strm->total_out += out;
state->total += out;
if (INFLATE_NEED_CHECKSUM(strm) && out)
strm->adler = state->check =
UPDATE(state->check, put - out, out);
out = left;
if ((
REVERSE(hold)) != state->check) {
strm->msg = (char *)"incorrect data check";
state->mode = BAD;
break;
}
INITBITS();
}
state->mode = DONE;
fallthrough;
case DONE:
ret = Z_STREAM_END;
goto inf_leave;
case BAD:
ret = Z_DATA_ERROR;
goto inf_leave;
case MEM:
return Z_MEM_ERROR;
case SYNC:
default:
return Z_STREAM_ERROR;
}
/*
Return from inflate(), updating the total counts and the check value.
If there was no progress during the inflate() call, return a buffer
error. Call zlib_updatewindow() to create and/or update the window state.
*/
inf_leave:
RESTORE();
if (INFLATE_NEED_UPDATEWINDOW(strm) &&
(state->wsize || (state->mode < CHECK && out != strm->avail_out)))
zlib_updatewindow(strm, out);
in -= strm->avail_in;
out -= strm->avail_out;
strm->total_in += in;
strm->total_out += out;
state->total += out;
if (INFLATE_NEED_CHECKSUM(strm) && state->wrap && out)
strm->adler = state->check =
UPDATE(state->check, strm->next_out - out, out);
strm->data_type = state->bits + (state->last ? 64 : 0) +
(state->mode == TYPE ? 128 : 0);
if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
strm->avail_out != 0 && strm->avail_in == 0)
return zlib_inflateSyncPacket(strm);
if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
ret = Z_BUF_ERROR;
return ret;
}
int zlib_inflateEnd(z_streamp strm)
{
if (strm == NULL || strm->state == NULL)
return Z_STREAM_ERROR;
return Z_OK;
}
/*
* This subroutine adds the data at next_in/avail_in to the output history
* without performing any output. The output buffer must be "caught up";
* i.e. no pending output but this should always be the case. The state must
* be waiting on the start of a block (i.e. mode == TYPE or HEAD). On exit,
* the output will also be caught up, and the checksum will have been updated
* if need be.
*/
int zlib_inflateIncomp(z_stream *z)
{
struct inflate_state *state = (struct inflate_state *)z->state;
Byte *saved_no = z->next_out;
uInt saved_ao = z->avail_out;
if (state->mode != TYPE && state->mode != HEAD)
return Z_DATA_ERROR;
/* Setup some variables to allow misuse of updateWindow */
z->avail_out = 0;
z->next_out = (unsigned char*)z->next_in + z->avail_in;
zlib_updatewindow(z, z->avail_in);
/* Restore saved variables */
z->avail_out = saved_ao;
z->next_out = saved_no;
z->adler = state->check =
UPDATE(state->check, z->next_in, z->avail_in);
z->total_out += z->avail_in;
z->total_in += z->avail_in;
z->next_in += z->avail_in;
state->total += z->avail_in;
z->avail_in = 0;
return Z_OK;
}
| linux-master | lib/zlib_inflate/inflate.c |
#include <linux/zutil.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/* Utility function: initialize zlib, unpack binary blob, clean up zlib,
* return len or negative error code.
*/
int zlib_inflate_blob(void *gunzip_buf, unsigned int sz,
const void *buf, unsigned int len)
{
const u8 *zbuf = buf;
struct z_stream_s *strm;
int rc;
rc = -ENOMEM;
strm = kmalloc(sizeof(*strm), GFP_KERNEL);
if (strm == NULL)
goto gunzip_nomem1;
strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (strm->workspace == NULL)
goto gunzip_nomem2;
/* gzip header (1f,8b,08... 10 bytes total + possible asciz filename)
* expected to be stripped from input
*/
strm->next_in = zbuf;
strm->avail_in = len;
strm->next_out = gunzip_buf;
strm->avail_out = sz;
rc = zlib_inflateInit2(strm, -MAX_WBITS);
if (rc == Z_OK) {
rc = zlib_inflate(strm, Z_FINISH);
/* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
if (rc == Z_STREAM_END)
rc = sz - strm->avail_out;
else
rc = -EINVAL;
zlib_inflateEnd(strm);
} else
rc = -EINVAL;
kfree(strm->workspace);
gunzip_nomem2:
kfree(strm);
gunzip_nomem1:
return rc; /* returns Z_OK (0) if successful */
}
| linux-master | lib/zlib_inflate/infutil.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/zlib_inflate/inflate_syms.c
*
* Exported symbols for the inflate functionality.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/zlib.h>
EXPORT_SYMBOL(zlib_inflate_workspacesize);
EXPORT_SYMBOL(zlib_inflate);
EXPORT_SYMBOL(zlib_inflateInit2);
EXPORT_SYMBOL(zlib_inflateEnd);
EXPORT_SYMBOL(zlib_inflateReset);
EXPORT_SYMBOL(zlib_inflateIncomp);
EXPORT_SYMBOL(zlib_inflate_blob);
MODULE_LICENSE("GPL");
| linux-master | lib/zlib_inflate/inflate_syms.c |
/* inftrees.c -- generate Huffman trees for efficient decoding
* Copyright (C) 1995-2005 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include <linux/zutil.h>
#include "inftrees.h"
#define MAXBITS 15
/*
Build a set of tables to decode the provided canonical Huffman code.
The code lengths are lens[0..codes-1]. The result starts at *table,
whose indices are 0..2^bits-1. work is a writable array of at least
lens shorts, which is used as a work area. type is the type of code
to be generated, CODES, LENS, or DISTS. On return, zero is success,
-1 is an invalid code, and +1 means that ENOUGH isn't enough. table
on return points to the next available entry's address. bits is the
requested root table index bits, and on return it is the actual root
table index bits. It will differ if the request is greater than the
longest code or if it is less than the shortest code.
*/
int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
code **table, unsigned *bits, unsigned short *work)
{
unsigned len; /* a code's length in bits */
unsigned sym; /* index of code symbols */
unsigned min, max; /* minimum and maximum code lengths */
unsigned root; /* number of index bits for root table */
unsigned curr; /* number of index bits for current table */
unsigned drop; /* code bits to drop for sub-table */
int left; /* number of prefix codes available */
unsigned used; /* code entries in table used */
unsigned huff; /* Huffman code */
unsigned incr; /* for incrementing code, index */
unsigned fill; /* index for replicating entries */
unsigned low; /* low bits for current root entry */
unsigned mask; /* mask for low root bits */
code this; /* table entry for duplication */
code *next; /* next available space in table */
const unsigned short *base; /* base value table to use */
const unsigned short *extra; /* extra bits table to use */
int end; /* use base and extra for symbol > end */
unsigned short count[MAXBITS+1]; /* number of codes of each length */
unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
static const unsigned short lbase[31] = { /* Length codes 257..285 base */
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const unsigned short lext[31] = { /* Length codes 257..285 extra */
16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196};
static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577, 0, 0};
static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
28, 28, 29, 29, 64, 64};
/*
Process a set of code lengths to create a canonical Huffman code. The
code lengths are lens[0..codes-1]. Each length corresponds to the
symbols 0..codes-1. The Huffman code is generated by first sorting the
symbols by length from short to long, and retaining the symbol order
for codes with equal lengths. Then the code starts with all zero bits
for the first code of the shortest length, and the codes are integer
increments for the same length, and zeros are appended as the length
increases. For the deflate format, these bits are stored backwards
from their more natural integer increment ordering, and so when the
decoding tables are built in the large loop below, the integer codes
are incremented backwards.
This routine assumes, but does not check, that all of the entries in
lens[] are in the range 0..MAXBITS. The caller must assure this.
1..MAXBITS is interpreted as that code length. zero means that that
symbol does not occur in this code.
The codes are sorted by computing a count of codes for each length,
creating from that a table of starting indices for each length in the
sorted table, and then entering the symbols in order in the sorted
table. The sorted table is work[], with that space being provided by
the caller.
The length counts are used for other purposes as well, i.e. finding
the minimum and maximum length codes, determining if there are any
codes at all, checking for a valid set of lengths, and looking ahead
at length counts to determine sub-table sizes when building the
decoding tables.
*/
/* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
for (len = 0; len <= MAXBITS; len++)
count[len] = 0;
for (sym = 0; sym < codes; sym++)
count[lens[sym]]++;
/* bound code lengths, force root to be within code lengths */
root = *bits;
for (max = MAXBITS; max >= 1; max--)
if (count[max] != 0) break;
if (root > max) root = max;
if (max == 0) { /* no symbols to code at all */
this.op = (unsigned char)64; /* invalid code marker */
this.bits = (unsigned char)1;
this.val = (unsigned short)0;
*(*table)++ = this; /* make a table to force an error */
*(*table)++ = this;
*bits = 1;
return 0; /* no symbols, but wait for decoding to report error */
}
for (min = 1; min < MAXBITS; min++)
if (count[min] != 0) break;
if (root < min) root = min;
/* check for an over-subscribed or incomplete set of lengths */
left = 1;
for (len = 1; len <= MAXBITS; len++) {
left <<= 1;
left -= count[len];
if (left < 0) return -1; /* over-subscribed */
}
if (left > 0 && (type == CODES || max != 1))
return -1; /* incomplete set */
/* generate offsets into symbol table for each length for sorting */
offs[1] = 0;
for (len = 1; len < MAXBITS; len++)
offs[len + 1] = offs[len] + count[len];
/* sort symbols by length, by symbol order within each length */
for (sym = 0; sym < codes; sym++)
if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
/*
Create and fill in decoding tables. In this loop, the table being
filled is at next and has curr index bits. The code being used is huff
with length len. That code is converted to an index by dropping drop
bits off of the bottom. For codes where len is less than drop + curr,
those top drop + curr - len bits are incremented through all values to
fill the table with replicated entries.
root is the number of index bits for the root table. When len exceeds
root, sub-tables are created pointed to by the root entry with an index
of the low root bits of huff. This is saved in low to check for when a
new sub-table should be started. drop is zero when the root table is
being filled, and drop is root when sub-tables are being filled.
When a new sub-table is needed, it is necessary to look ahead in the
code lengths to determine what size sub-table is needed. The length
counts are used for this, and so count[] is decremented as codes are
entered in the tables.
used keeps track of how many table entries have been allocated from the
provided *table space. It is checked when a LENS table is being made
against the space in *table, ENOUGH, minus the maximum space needed by
the worst case distance code, MAXD. This should never happen, but the
sufficiency of ENOUGH has not been proven exhaustively, hence the check.
This assumes that when type == LENS, bits == 9.
sym increments through all symbols, and the loop terminates when
all codes of length max, i.e. all codes, have been processed. This
routine permits incomplete codes, so another loop after this one fills
in the rest of the decoding tables with invalid code markers.
*/
/* set up for code type */
switch (type) {
case CODES:
base = extra = work; /* dummy value--not used */
end = 19;
break;
case LENS:
base = lbase;
base -= 257;
extra = lext;
extra -= 257;
end = 256;
break;
default: /* DISTS */
base = dbase;
extra = dext;
end = -1;
}
/* initialize state for loop */
huff = 0; /* starting code */
sym = 0; /* starting code symbol */
len = min; /* starting code length */
next = *table; /* current table to fill in */
curr = root; /* current table index bits */
drop = 0; /* current bits to drop from code for index */
low = (unsigned)(-1); /* trigger new sub-table when len > root */
used = 1U << root; /* use root table entries */
mask = used - 1; /* mask for comparing low */
/* check available table space */
if (type == LENS && used >= ENOUGH - MAXD)
return 1;
/* process all codes and make table entries */
for (;;) {
/* create table entry */
this.bits = (unsigned char)(len - drop);
if ((int)(work[sym]) < end) {
this.op = (unsigned char)0;
this.val = work[sym];
}
else if ((int)(work[sym]) > end) {
this.op = (unsigned char)(extra[work[sym]]);
this.val = base[work[sym]];
}
else {
this.op = (unsigned char)(32 + 64); /* end of block */
this.val = 0;
}
/* replicate for those indices with low len bits equal to huff */
incr = 1U << (len - drop);
fill = 1U << curr;
min = fill; /* save offset to next table */
do {
fill -= incr;
next[(huff >> drop) + fill] = this;
} while (fill != 0);
/* backwards increment the len-bit code huff */
incr = 1U << (len - 1);
while (huff & incr)
incr >>= 1;
if (incr != 0) {
huff &= incr - 1;
huff += incr;
}
else
huff = 0;
/* go to next symbol, update count, len */
sym++;
if (--(count[len]) == 0) {
if (len == max) break;
len = lens[work[sym]];
}
/* create new sub-table if needed */
if (len > root && (huff & mask) != low) {
/* if first time, transition to sub-tables */
if (drop == 0)
drop = root;
/* increment past last table */
next += min; /* here min is 1 << curr */
/* determine length of next table */
curr = len - drop;
left = (int)(1 << curr);
while (curr + drop < max) {
left -= count[curr + drop];
if (left <= 0) break;
curr++;
left <<= 1;
}
/* check for enough space */
used += 1U << curr;
if (type == LENS && used >= ENOUGH - MAXD)
return 1;
/* point entry in root table to sub-table */
low = huff & mask;
(*table)[low].op = (unsigned char)curr;
(*table)[low].bits = (unsigned char)root;
(*table)[low].val = (unsigned short)(next - *table);
}
}
/*
Fill in rest of table for incomplete codes. This loop is similar to the
loop above in incrementing huff for table indices. It is assumed that
len is equal to curr + drop, so there is no loop needed to increment
through high index bits. When the current sub-table is filled, the loop
drops back to the root table to fill in any remaining entries there.
*/
this.op = (unsigned char)64; /* invalid code marker */
this.bits = (unsigned char)(len - drop);
this.val = (unsigned short)0;
while (huff != 0) {
/* when done with sub-table, drop back to root table */
if (drop != 0 && (huff & mask) != low) {
drop = 0;
len = root;
next = *table;
this.bits = (unsigned char)len;
}
/* put invalid code marker in table */
next[huff >> drop] = this;
/* backwards increment the len-bit code huff */
incr = 1U << (len - 1);
while (huff & incr)
incr >>= 1;
if (incr != 0) {
huff &= incr - 1;
huff += incr;
}
else
huff = 0;
}
/* set return parameters */
*table += used;
*bits = root;
return 0;
}
| linux-master | lib/zlib_inflate/inftrees.c |
/* inffast.c -- fast decoding
* Copyright (C) 1995-2004 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include <linux/zutil.h>
#include "inftrees.h"
#include "inflate.h"
#include "inffast.h"
#ifndef ASMINF
union uu {
unsigned short us;
unsigned char b[2];
};
/* Endian independent version */
static inline unsigned short
get_unaligned16(const unsigned short *p)
{
union uu mm;
unsigned char *b = (unsigned char *)p;
mm.b[0] = b[0];
mm.b[1] = b[1];
return mm.us;
}
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
available, an end-of-block is encountered, or a data error is encountered.
When large enough input and output buffers are supplied to inflate(), for
example, a 16K input buffer and a 64K output buffer, more than 95% of the
inflate execution time is spent in this routine.
Entry assumptions:
state->mode == LEN
strm->avail_in >= 6
strm->avail_out >= 258
start >= strm->avail_out
state->bits < 8
On return, state->mode is one of:
LEN -- ran out of enough output space or enough available input
TYPE -- reached end of block code, inflate() to interpret next block
BAD -- error in block data
Notes:
- The maximum input bits used by a length/distance pair is 15 bits for the
length code, 5 bits for the length extra, 15 bits for the distance code,
and 13 bits for the distance extra. This totals 48 bits, or six bytes.
Therefore if strm->avail_in >= 6, then there is enough input to avoid
checking for available input while decoding.
- The maximum bytes that a single length/distance pair can output is 258
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm->avail_out >= 258 for each loop to avoid checking for
output space.
- @start: inflate()'s starting value for strm->avail_out
*/
void inflate_fast(z_streamp strm, unsigned start)
{
struct inflate_state *state;
const unsigned char *in; /* local strm->next_in */
const unsigned char *last; /* while in < last, enough input available */
unsigned char *out; /* local strm->next_out */
unsigned char *beg; /* inflate()'s initial strm->next_out */
unsigned char *end; /* while out < end, enough space available */
#ifdef INFLATE_STRICT
unsigned dmax; /* maximum distance from zlib header */
#endif
unsigned wsize; /* window size or zero if not using window */
unsigned whave; /* valid bytes in the window */
unsigned write; /* window write index */
unsigned char *window; /* allocated sliding window, if wsize != 0 */
unsigned long hold; /* local strm->hold */
unsigned bits; /* local strm->bits */
code const *lcode; /* local strm->lencode */
code const *dcode; /* local strm->distcode */
unsigned lmask; /* mask for first level of length codes */
unsigned dmask; /* mask for first level of distance codes */
code this; /* retrieved table entry */
unsigned op; /* code bits, operation, extra bits, or */
/* window position, window bytes to copy */
unsigned len; /* match length, unused bytes */
unsigned dist; /* match distance */
unsigned char *from; /* where to copy match from */
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
in = strm->next_in;
last = in + (strm->avail_in - 5);
out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
dmax = state->dmax;
#endif
wsize = state->wsize;
whave = state->whave;
write = state->write;
window = state->window;
hold = state->hold;
bits = state->bits;
lcode = state->lencode;
dcode = state->distcode;
lmask = (1U << state->lenbits) - 1;
dmask = (1U << state->distbits) - 1;
/* decode literals and length/distances until end-of-block or not enough
input data or output space */
do {
if (bits < 15) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = lcode[hold & lmask];
dolen:
op = (unsigned)(this.bits);
hold >>= op;
bits -= op;
op = (unsigned)(this.op);
if (op == 0) { /* literal */
*out++ = (unsigned char)(this.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
hold >>= op;
bits -= op;
}
if (bits < 15) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = dcode[hold & dmask];
dodist:
op = (unsigned)(this.bits);
hold >>= op;
bits -= op;
op = (unsigned)(this.op);
if (op & 16) { /* distance base */
dist = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (bits < op) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
if (bits < op) {
hold += (unsigned long)(*in++) << bits;
bits += 8;
}
}
dist += (unsigned)hold & ((1U << op) - 1);
#ifdef INFLATE_STRICT
if (dist > dmax) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
#endif
hold >>= op;
bits -= op;
op = (unsigned)(out - beg); /* max distance in output */
if (dist > op) { /* see if copy from window */
op = dist - op; /* distance back in window */
if (op > whave) {
strm->msg = (char *)"invalid distance too far back";
state->mode = BAD;
break;
}
from = window;
if (write == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
*out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
else if (write < op) { /* wrap around window */
from += wsize + write - op;
op -= write;
if (op < len) { /* some from end of window */
len -= op;
do {
*out++ = *from++;
} while (--op);
from = window;
if (write < len) { /* some from start of window */
op = write;
len -= op;
do {
*out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
}
else { /* contiguous in window */
from += write - op;
if (op < len) { /* some from window */
len -= op;
do {
*out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
*out++ = *from++;
*out++ = *from++;
*out++ = *from++;
len -= 3;
}
if (len) {
*out++ = *from++;
if (len > 1)
*out++ = *from++;
}
}
else {
unsigned short *sout;
unsigned long loops;
from = out - dist; /* copy direct from output */
/* minimum length is three */
/* Align out addr */
if (!((long)(out - 1) & 1)) {
*out++ = *from++;
len--;
}
sout = (unsigned short *)(out);
if (dist > 2) {
unsigned short *sfrom;
sfrom = (unsigned short *)(from);
loops = len >> 1;
do {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
*sout++ = *sfrom++;
else
*sout++ = get_unaligned16(sfrom++);
} while (--loops);
out = (unsigned char *)sout;
from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
unsigned short pat16;
pat16 = *(sout-1);
if (dist == 1) {
union uu mm;
/* copy one char pattern to both bytes */
mm.us = pat16;
mm.b[0] = mm.b[1];
pat16 = mm.us;
}
loops = len >> 1;
do
*sout++ = pat16;
while (--loops);
out = (unsigned char *)sout;
}
if (len & 1)
*out++ = *from++;
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
this = dcode[this.val + (hold & ((1U << op) - 1))];
goto dodist;
}
else {
strm->msg = (char *)"invalid distance code";
state->mode = BAD;
break;
}
}
else if ((op & 64) == 0) { /* 2nd level length code */
this = lcode[this.val + (hold & ((1U << op) - 1))];
goto dolen;
}
else if (op & 32) { /* end-of-block */
state->mode = TYPE;
break;
}
else {
strm->msg = (char *)"invalid literal/length code";
state->mode = BAD;
break;
}
} while (in < last && out < end);
/* return unused bytes (on entry, bits < 8, so in won't go too far back) */
len = bits >> 3;
in -= len;
bits -= len << 3;
hold &= (1U << bits) - 1;
/* update state and return */
strm->next_in = in;
strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));
state->hold = hold;
state->bits = bits;
return;
}
/*
inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
- Using bit fields for code structure
- Different op definition to avoid & for extra bits (do & for table bits)
- Three separate decoding do-loops for direct, window, and write == 0
- Special case for distance > 1 copies to do overlapped load and store copy
- Explicit branch predictions (based on measured branch probabilities)
- Deferring match copy and interspersed it with decoding subsequent codes
- Swapping literal/length else
- Swapping window/direct else
- Larger unrolled copy loops (three is about right)
- Moving len -= 3 statement into middle of loop
*/
#endif /* !ASMINF */
| linux-master | lib/zlib_inflate/inffast.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/font.h>
#define FONTDATAMAX 11264
static const struct font_data fontdata_sun12x22 = {
{ 0, 0, FONTDATAMAX, 0 }, {
/* 0 0x00 '^@' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 1 0x01 '^A' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xc0, /* 000111111100 */
0x30, 0x60, /* 001100000110 */
0x65, 0x30, /* 011001010011 */
0x6d, 0xb0, /* 011011011011 */
0x60, 0x30, /* 011000000011 */
0x62, 0x30, /* 011000100011 */
0x62, 0x30, /* 011000100011 */
0x60, 0x30, /* 011000000011 */
0x6f, 0xb0, /* 011011111011 */
0x67, 0x30, /* 011001110011 */
0x30, 0x60, /* 001100000110 */
0x1f, 0xc0, /* 000111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 2 0x02 '^B' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xc0, /* 000111111100 */
0x3f, 0xe0, /* 001111111110 */
0x7a, 0xf0, /* 011110101111 */
0x72, 0x70, /* 011100100111 */
0x7f, 0xf0, /* 011111111111 */
0x7d, 0xf0, /* 011111011111 */
0x7d, 0xf0, /* 011111011111 */
0x7f, 0xf0, /* 011111111111 */
0x70, 0x70, /* 011100000111 */
0x78, 0xf0, /* 011110001111 */
0x3f, 0xe0, /* 001111111110 */
0x1f, 0xc0, /* 000111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 3 0x03 '^C' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x3f, 0xc0, /* 001111111100 */
0x7f, 0xe0, /* 011111111110 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 4 0x04 '^D' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x1f, 0xc0, /* 000111111100 */
0x1f, 0xc0, /* 000111111100 */
0x3f, 0xe0, /* 001111111110 */
0x1f, 0xc0, /* 000111111100 */
0x1f, 0xc0, /* 000111111100 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x07, 0x00, /* 000001110000 */
0x02, 0x00, /* 000000100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 5 0x05 '^E' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x07, 0x00, /* 000001110000 */
0x02, 0x00, /* 000000100000 */
0x18, 0xc0, /* 000110001100 */
0x3d, 0xe0, /* 001111011110 */
0x3d, 0xe0, /* 001111011110 */
0x1a, 0xc0, /* 000110101100 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x1f, 0xc0, /* 000111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 6 0x06 '^F' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x36, 0xc0, /* 001101101100 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 7 0x07 '^G' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 8 0x08 '^H' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xf9, 0xf0, /* 111110011111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xe0, 0x70, /* 111000000111 */
0xe0, 0x70, /* 111000000111 */
0xc0, 0x30, /* 110000000011 */
0xc0, 0x30, /* 110000000011 */
0xe0, 0x70, /* 111000000111 */
0xe0, 0x70, /* 111000000111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xf9, 0xf0, /* 111110011111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 9 0x09 '^I' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 10 0x0a '^J' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xf9, 0xf0, /* 111110011111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xe6, 0x70, /* 111001100111 */
0xe6, 0x70, /* 111001100111 */
0xcf, 0x30, /* 110011110011 */
0xcf, 0x30, /* 110011110011 */
0xe6, 0x70, /* 111001100111 */
0xe6, 0x70, /* 111001100111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xf9, 0xf0, /* 111110011111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 11 0x0b '^K' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xe0, /* 000011111110 */
0x0f, 0xe0, /* 000011111110 */
0x01, 0xe0, /* 000000011110 */
0x03, 0x60, /* 000000110110 */
0x06, 0x60, /* 000001100110 */
0x1e, 0x00, /* 000111100000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x00, /* 001100110000 */
0x61, 0x80, /* 011000011000 */
0x61, 0x80, /* 011000011000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x00, /* 001100110000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 12 0x0c '^L' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 13 0x0d '^M' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xe0, /* 000011111110 */
0x0c, 0x60, /* 000011000110 */
0x0c, 0x60, /* 000011000110 */
0x0f, 0xe0, /* 000011111110 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x3c, 0x00, /* 001111000000 */
0x7c, 0x00, /* 011111000000 */
0x78, 0x00, /* 011110000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 14 0x0e '^N' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xe0, /* 000111111110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x1f, 0xe0, /* 000111111110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x19, 0xe0, /* 000110011110 */
0x1b, 0xe0, /* 000110111110 */
0x1b, 0xc0, /* 000110111100 */
0x79, 0x80, /* 011110011000 */
0xf8, 0x00, /* 111110000000 */
0xf0, 0x00, /* 111100000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 15 0x0f '^O' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0xc0, /* 000110001100 */
0x0d, 0x80, /* 000011011000 */
0x6d, 0xb0, /* 011011011011 */
0x3d, 0xe0, /* 001111011110 */
0x00, 0x00, /* 000000000000 */
0x3d, 0xe0, /* 001111011110 */
0x6d, 0xb0, /* 011011011011 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 16 0x10 '^P' */
0x00, 0x00, /* 000000000000 */
0x00, 0x20, /* 000000000010 */
0x00, 0x60, /* 000000000110 */
0x00, 0xe0, /* 000000001110 */
0x01, 0xe0, /* 000000011110 */
0x03, 0xe0, /* 000000111110 */
0x07, 0xe0, /* 000001111110 */
0x0f, 0xe0, /* 000011111110 */
0x1f, 0xe0, /* 000111111110 */
0x3f, 0xe0, /* 001111111110 */
0x7f, 0xe0, /* 011111111110 */
0x3f, 0xe0, /* 001111111110 */
0x1f, 0xe0, /* 000111111110 */
0x0f, 0xe0, /* 000011111110 */
0x07, 0xe0, /* 000001111110 */
0x03, 0xe0, /* 000000111110 */
0x01, 0xe0, /* 000000011110 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x20, /* 000000000010 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 17 0x11 '^Q' */
0x00, 0x00, /* 000000000000 */
0x40, 0x00, /* 010000000000 */
0x60, 0x00, /* 011000000000 */
0x70, 0x00, /* 011100000000 */
0x78, 0x00, /* 011110000000 */
0x7c, 0x00, /* 011111000000 */
0x7e, 0x00, /* 011111100000 */
0x7f, 0x00, /* 011111110000 */
0x7f, 0x80, /* 011111111000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0x80, /* 011111111000 */
0x7f, 0x00, /* 011111110000 */
0x7e, 0x00, /* 011111100000 */
0x7c, 0x00, /* 011111000000 */
0x78, 0x00, /* 011110000000 */
0x70, 0x00, /* 011100000000 */
0x60, 0x00, /* 011000000000 */
0x40, 0x00, /* 010000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 18 0x12 '^R' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 19 0x13 '^S' */
0x00, 0x00, /* 000000000000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 20 0x14 '^T' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xf0, /* 000111111111 */
0x3c, 0xc0, /* 001111001100 */
0x7c, 0xc0, /* 011111001100 */
0x7c, 0xc0, /* 011111001100 */
0x7c, 0xc0, /* 011111001100 */
0x3c, 0xc0, /* 001111001100 */
0x1c, 0xc0, /* 000111001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x1c, 0xe0, /* 000111001110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 21 0x15 '^U' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x1f, 0x00, /* 000111110000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 22 0x16 '^V' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 23 0x17 '^W' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 24 0x18 '^X' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 25 0x19 '^Y' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 26 0x1a '^Z' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x08, 0x00, /* 000010000000 */
0x18, 0x00, /* 000110000000 */
0x38, 0x00, /* 001110000000 */
0x7f, 0xe0, /* 011111111110 */
0xff, 0xe0, /* 111111111110 */
0x7f, 0xe0, /* 011111111110 */
0x38, 0x00, /* 001110000000 */
0x18, 0x00, /* 000110000000 */
0x08, 0x00, /* 000010000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 27 0x1b '^[' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x00, /* 000000010000 */
0x01, 0x80, /* 000000011000 */
0x01, 0xc0, /* 000000011100 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xf0, /* 011111111111 */
0x7f, 0xe0, /* 011111111110 */
0x01, 0xc0, /* 000000011100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x00, /* 000000010000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 28 0x1c '^\' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x3f, 0xe0, /* 001111111110 */
0x3f, 0xe0, /* 001111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 29 0x1d '^]' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x09, 0x00, /* 000010010000 */
0x19, 0x80, /* 000110011000 */
0x39, 0xc0, /* 001110011100 */
0x7f, 0xe0, /* 011111111110 */
0xff, 0xf0, /* 111111111111 */
0x7f, 0xe0, /* 011111111110 */
0x39, 0xc0, /* 001110011100 */
0x19, 0x80, /* 000110011000 */
0x09, 0x00, /* 000010010000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 30 0x1e '^^' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xc0, /* 011111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 31 0x1f '^_' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 32 0x20 ' ' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 33 0x21 '!' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 34 0x22 '"' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 35 0x23 '#' */
0x00, 0x00, /* 000000000000 */
0x03, 0x30, /* 000000110011 */
0x03, 0x30, /* 000000110011 */
0x03, 0x30, /* 000000110011 */
0x06, 0x60, /* 000001100110 */
0x1f, 0xf0, /* 000111111111 */
0x1f, 0xf0, /* 000111111111 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xc0, /* 011111111100 */
0x33, 0x00, /* 001100110000 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 36 0x24 '$' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x66, 0xe0, /* 011001101110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x00, /* 011001100000 */
0x3e, 0x00, /* 001111100000 */
0x1f, 0x80, /* 000111111000 */
0x07, 0xc0, /* 000001111100 */
0x06, 0x60, /* 000001100110 */
0x06, 0x60, /* 000001100110 */
0x66, 0x60, /* 011001100110 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 37 0x25 '%' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x38, 0xc0, /* 001110001100 */
0x4c, 0xc0, /* 010011001100 */
0x45, 0x80, /* 010001011000 */
0x65, 0x80, /* 011001011000 */
0x3b, 0x00, /* 001110110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0d, 0xc0, /* 000011011100 */
0x1a, 0x60, /* 000110100110 */
0x1a, 0x20, /* 000110100010 */
0x33, 0x20, /* 001100110010 */
0x31, 0xc0, /* 001100011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 38 0x26 '&' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x0f, 0x80, /* 000011111000 */
0x1e, 0x00, /* 000111100000 */
0x3e, 0x00, /* 001111100000 */
0x77, 0x00, /* 011101110000 */
0x63, 0x60, /* 011000110110 */
0x61, 0xe0, /* 011000011110 */
0x61, 0xc0, /* 011000011100 */
0x61, 0x80, /* 011000011000 */
0x3f, 0xe0, /* 001111111110 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 39 0x27 ''' */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x10, 0x00, /* 000100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 40 0x28 '(' */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 41 0x29 ')' */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 42 0x2a '*' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x66, 0x60, /* 011001100110 */
0x76, 0xe0, /* 011101101110 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x76, 0xe0, /* 011101101110 */
0x66, 0x60, /* 011001100110 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 43 0x2b '+' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 44 0x2c ',' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x10, 0x00, /* 000100000000 */
0x00, 0x00, /* 000000000000 */
/* 45 0x2d '-' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 46 0x2e '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 47 0x2f '/' */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 48 0x30 '0' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0x80, /* 000100011000 */
0x10, 0xc0, /* 000100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x80, /* 001100001000 */
0x18, 0x80, /* 000110001000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 49 0x31 '1' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x06, 0x00, /* 000001100000 */
0x0e, 0x00, /* 000011100000 */
0x1e, 0x00, /* 000111100000 */
0x36, 0x00, /* 001101100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 50 0x32 '2' */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x61, 0xc0, /* 011000011100 */
0x40, 0xc0, /* 010000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 51 0x33 '3' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x1f, 0xc0, /* 000111111100 */
0x20, 0xe0, /* 001000001110 */
0x40, 0x60, /* 010000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0xe0, /* 000000001110 */
0x07, 0xc0, /* 000001111100 */
0x0f, 0xc0, /* 000011111100 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x40, 0x60, /* 010000000110 */
0x60, 0x40, /* 011000000100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 52 0x34 '4' */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x80, /* 000000111000 */
0x03, 0x80, /* 000000111000 */
0x05, 0x80, /* 000001011000 */
0x05, 0x80, /* 000001011000 */
0x09, 0x80, /* 000010011000 */
0x09, 0x80, /* 000010011000 */
0x11, 0x80, /* 000100011000 */
0x11, 0x80, /* 000100011000 */
0x21, 0x80, /* 001000011000 */
0x3f, 0xe0, /* 001111111110 */
0x7f, 0xe0, /* 011111111110 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 53 0x35 '5' */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x00, /* 000100000000 */
0x10, 0x00, /* 000100000000 */
0x20, 0x00, /* 001000000000 */
0x3f, 0x80, /* 001111111000 */
0x31, 0xc0, /* 001100011100 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x40, 0x60, /* 010000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 54 0x36 '6' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x60, 0x00, /* 011000000000 */
0x67, 0x80, /* 011001111000 */
0x6f, 0xc0, /* 011011111100 */
0x70, 0xe0, /* 011100001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 55 0x37 '7' */
0x00, 0x00, /* 000000000000 */
0x1f, 0xe0, /* 000111111110 */
0x3f, 0xe0, /* 001111111110 */
0x60, 0x40, /* 011000000100 */
0x00, 0x40, /* 000000000100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x80, /* 000000001000 */
0x00, 0x80, /* 000000001000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x00, /* 000000010000 */
0x01, 0x00, /* 000000010000 */
0x03, 0x00, /* 000000110000 */
0x02, 0x00, /* 000000100000 */
0x02, 0x00, /* 000000100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 56 0x38 '8' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x11, 0x80, /* 000100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x11, 0x80, /* 000100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x18, 0x80, /* 000110001000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 57 0x39 '9' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0xe0, /* 011100001110 */
0x3f, 0x60, /* 001111110110 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x60, /* 000000000110 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x07, 0x00, /* 000001110000 */
0x3c, 0x00, /* 001111000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 58 0x3a ':' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 59 0x3b ';' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x10, 0x00, /* 000100000000 */
0x00, 0x00, /* 000000000000 */
/* 60 0x3c '<' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x01, 0xc0, /* 000000011100 */
0x07, 0x00, /* 000001110000 */
0x1c, 0x00, /* 000111000000 */
0x70, 0x00, /* 011100000000 */
0x70, 0x00, /* 011100000000 */
0x1c, 0x00, /* 000111000000 */
0x07, 0x00, /* 000001110000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x60, /* 000000000110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 61 0x3d '=' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 62 0x3e '>' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0x38, 0x00, /* 001110000000 */
0x0e, 0x00, /* 000011100000 */
0x03, 0x80, /* 000000111000 */
0x00, 0xe0, /* 000000001110 */
0x00, 0xe0, /* 000000001110 */
0x03, 0x80, /* 000000111000 */
0x0e, 0x00, /* 000011100000 */
0x38, 0x00, /* 001110000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 63 0x3f '?' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x39, 0xc0, /* 001110011100 */
0x20, 0xc0, /* 001000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 64 0x40 '@' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x3f, 0xc0, /* 001111111100 */
0x30, 0x60, /* 001100000110 */
0x60, 0x60, /* 011000000110 */
0x67, 0x20, /* 011001110010 */
0x6f, 0xa0, /* 011011111010 */
0x6c, 0xa0, /* 011011001010 */
0x6c, 0xa0, /* 011011001010 */
0x67, 0xe0, /* 011001111110 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x3f, 0xe0, /* 001111111110 */
0x0f, 0xe0, /* 000011111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 65 0x41 'A' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x09, 0x00, /* 000010010000 */
0x11, 0x80, /* 000100011000 */
0x11, 0x80, /* 000100011000 */
0x10, 0x80, /* 000100001000 */
0x3f, 0xc0, /* 001111111100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x40, /* 001000000100 */
0x40, 0x60, /* 010000000110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 66 0x42 'B' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x00, /* 111111110000 */
0x60, 0x80, /* 011000001000 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x61, 0x80, /* 011000011000 */
0x7f, 0x80, /* 011111111000 */
0x60, 0xc0, /* 011000001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0xc0, /* 011000001100 */
0xff, 0x80, /* 111111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 67 0x43 'C' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x60, /* 000100000110 */
0x20, 0x20, /* 001000000010 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x20, 0x00, /* 001000000000 */
0x30, 0x20, /* 001100000010 */
0x18, 0x40, /* 000110000100 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 68 0x44 'D' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x00, /* 111111110000 */
0x61, 0xc0, /* 011000011100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x40, /* 011000000100 */
0x61, 0x80, /* 011000011000 */
0xfe, 0x00, /* 111111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 69 0x45 'E' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xc0, /* 011111111100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x80, /* 001100001000 */
0x3f, 0x80, /* 001111111000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x20, /* 001100000010 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 70 0x46 'F' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xc0, /* 011111111100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x80, /* 001100001000 */
0x3f, 0x80, /* 001111111000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x78, 0x00, /* 011110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 71 0x47 'G' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x60, /* 000100000110 */
0x20, 0x20, /* 001000000010 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x61, 0xf0, /* 011000011111 */
0x60, 0x60, /* 011000000110 */
0x20, 0x60, /* 001000000110 */
0x30, 0x60, /* 001100000110 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 72 0x48 'H' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xf0, /* 111100001111 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0xf0, 0xf0, /* 111100001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 73 0x49 'I' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 74 0x4a 'J' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x38, 0x00, /* 001110000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
/* 75 0x4b 'K' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xe0, /* 111100001110 */
0x61, 0x80, /* 011000011000 */
0x63, 0x00, /* 011000110000 */
0x66, 0x00, /* 011001100000 */
0x6c, 0x00, /* 011011000000 */
0x78, 0x00, /* 011110000000 */
0x78, 0x00, /* 011110000000 */
0x7c, 0x00, /* 011111000000 */
0x6e, 0x00, /* 011011100000 */
0x67, 0x00, /* 011001110000 */
0x63, 0x80, /* 011000111000 */
0x61, 0xc0, /* 011000011100 */
0x60, 0xe0, /* 011000001110 */
0xf0, 0x70, /* 111100000111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 76 0x4c 'L' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x78, 0x00, /* 011110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x20, /* 001100000010 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 77 0x4d 'M' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xe0, 0x70, /* 111000000111 */
0x60, 0xe0, /* 011000001110 */
0x70, 0xe0, /* 011100001110 */
0x70, 0xe0, /* 011100001110 */
0x70, 0xe0, /* 011100001110 */
0x59, 0x60, /* 010110010110 */
0x59, 0x60, /* 010110010110 */
0x59, 0x60, /* 010110010110 */
0x4d, 0x60, /* 010011010110 */
0x4e, 0x60, /* 010011100110 */
0x4e, 0x60, /* 010011100110 */
0x44, 0x60, /* 010001000110 */
0x44, 0x60, /* 010001000110 */
0xe4, 0xf0, /* 111001001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 78 0x4e 'N' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xc0, 0x70, /* 110000000111 */
0x60, 0x20, /* 011000000010 */
0x70, 0x20, /* 011100000010 */
0x78, 0x20, /* 011110000010 */
0x58, 0x20, /* 010110000010 */
0x4c, 0x20, /* 010011000010 */
0x46, 0x20, /* 010001100010 */
0x47, 0x20, /* 010001110010 */
0x43, 0x20, /* 010000110010 */
0x41, 0xa0, /* 010000011010 */
0x40, 0xe0, /* 010000001110 */
0x40, 0xe0, /* 010000001110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0x30, /* 111000000011 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 79 0x4f 'O' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x60, /* 001000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x20, 0x40, /* 001000000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 80 0x50 'P' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0x80, /* 011111111000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0xc0, /* 001100001100 */
0x37, 0x80, /* 001101111000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x78, 0x00, /* 011110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 81 0x51 'Q' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x60, /* 001000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0x40, /* 001100000100 */
0x38, 0x40, /* 001110000100 */
0x1f, 0x80, /* 000111111000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x23, 0x90, /* 001000111001 */
0x01, 0xe0, /* 000000011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 82 0x52 'R' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x00, /* 111111110000 */
0x61, 0x80, /* 011000011000 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0x80, /* 011000001000 */
0x7f, 0x00, /* 011111110000 */
0x7c, 0x00, /* 011111000000 */
0x6e, 0x00, /* 011011100000 */
0x67, 0x00, /* 011001110000 */
0x63, 0x80, /* 011000111000 */
0x61, 0xc0, /* 011000011100 */
0x60, 0xe0, /* 011000001110 */
0xf0, 0x70, /* 111100000111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 83 0x53 'S' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xe0, /* 000111111110 */
0x30, 0x60, /* 001100000110 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x70, 0x00, /* 011100000000 */
0x3c, 0x00, /* 001111000000 */
0x1e, 0x00, /* 000111100000 */
0x07, 0x80, /* 000001111000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0xe0, /* 000000001110 */
0x40, 0x60, /* 010000000110 */
0x40, 0x60, /* 010000000110 */
0x60, 0xc0, /* 011000001100 */
0x7f, 0x80, /* 011111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 84 0x54 'T' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x46, 0x20, /* 010001100010 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 85 0x55 'U' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x70, 0x40, /* 011100000100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 86 0x56 'V' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xe0, 0xe0, /* 111000001110 */
0x60, 0x40, /* 011000000100 */
0x30, 0x80, /* 001100001000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x80, /* 001100001000 */
0x19, 0x00, /* 000110010000 */
0x19, 0x00, /* 000110010000 */
0x19, 0x00, /* 000110010000 */
0x0a, 0x00, /* 000010100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 87 0x57 'W' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xfe, 0xf0, /* 111111101111 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x76, 0x20, /* 011101100010 */
0x77, 0x40, /* 011101110100 */
0x33, 0x40, /* 001100110100 */
0x37, 0x40, /* 001101110100 */
0x3b, 0xc0, /* 001110111100 */
0x3b, 0x80, /* 001110111000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 88 0x58 'X' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x38, 0x80, /* 001110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x11, 0x80, /* 000100011000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 89 0x59 'Y' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 90 0x5a 'Z' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xe0, /* 001111111110 */
0x20, 0xc0, /* 001000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x20, /* 000110000010 */
0x3f, 0xe0, /* 001111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 91 0x5b '[' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 92 0x5c '\' */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x60, /* 000000000110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 93 0x5d ']' */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 94 0x5e '^' */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1b, 0x00, /* 000110110000 */
0x31, 0x80, /* 001100011000 */
0x60, 0xc0, /* 011000001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 95 0x5f '_' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 96 0x60 '`' */
0x00, 0x00, /* 000000000000 */
0x01, 0x00, /* 000000010000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0x80, /* 000001111000 */
0x07, 0x80, /* 000001111000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 97 0x61 'a' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 98 0x62 'b' */
0x00, 0x00, /* 000000000000 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0xe0, 0x00, /* 111000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x67, 0x80, /* 011001111000 */
0x6f, 0xc0, /* 011011111100 */
0x70, 0xe0, /* 011100001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x60, /* 011100000110 */
0x78, 0xc0, /* 011110001100 */
0x4f, 0x80, /* 010011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 99 0x63 'c' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x31, 0xc0, /* 001100011100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x70, 0x40, /* 011100000100 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 100 0x64 'd' */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x0f, 0x60, /* 000011110110 */
0x31, 0xe0, /* 001100011110 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0xe0, /* 011100001110 */
0x39, 0x60, /* 001110010110 */
0x1e, 0x70, /* 000111100111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 101 0x65 'e' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 102 0x66 'f' */
0x00, 0x00, /* 000000000000 */
0x03, 0x80, /* 000000111000 */
0x04, 0xc0, /* 000001001100 */
0x04, 0xc0, /* 000001001100 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x3f, 0x80, /* 001111111000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 103 0x67 'g' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x20, /* 000111110010 */
0x31, 0xe0, /* 001100011110 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x31, 0x80, /* 001100011000 */
0x3f, 0x00, /* 001111110000 */
0x60, 0x00, /* 011000000000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0xe0, /* 001111111110 */
0x20, 0x60, /* 001000000110 */
0x40, 0x20, /* 010000000010 */
0x40, 0x20, /* 010000000010 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x00, 0x00, /* 000000000000 */
/* 104 0x68 'h' */
0x00, 0x00, /* 000000000000 */
0x10, 0x00, /* 000100000000 */
0x30, 0x00, /* 001100000000 */
0x70, 0x00, /* 011100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x37, 0x80, /* 001101111000 */
0x39, 0xc0, /* 001110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 105 0x69 'i' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 106 0x6a 'j' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x03, 0xc0, /* 000000111100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x20, 0xc0, /* 001000001100 */
0x30, 0xc0, /* 001100001100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
/* 107 0x6b 'k' */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0xe0, 0x00, /* 111000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x61, 0xc0, /* 011000011100 */
0x63, 0x00, /* 011000110000 */
0x66, 0x00, /* 011001100000 */
0x7c, 0x00, /* 011111000000 */
0x78, 0x00, /* 011110000000 */
0x7c, 0x00, /* 011111000000 */
0x6e, 0x00, /* 011011100000 */
0x67, 0x00, /* 011001110000 */
0x63, 0x80, /* 011000111000 */
0xf1, 0xe0, /* 111100011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 108 0x6c 'l' */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 109 0x6d 'm' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xdd, 0xc0, /* 110111011100 */
0x6e, 0xe0, /* 011011101110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0xef, 0x70, /* 111011110111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 110 0x6e 'n' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x27, 0x80, /* 001001111000 */
0x79, 0xc0, /* 011110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 111 0x6f 'o' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 112 0x70 'p' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xef, 0x80, /* 111011111000 */
0x71, 0xc0, /* 011100011100 */
0x60, 0xe0, /* 011000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x40, /* 011000000100 */
0x70, 0x80, /* 011100001000 */
0x7f, 0x00, /* 011111110000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0xf0, 0x00, /* 111100000000 */
0x00, 0x00, /* 000000000000 */
/* 113 0x71 'q' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x20, /* 000011110010 */
0x11, 0xe0, /* 000100011110 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x60, /* 011100000110 */
0x38, 0xe0, /* 001110001110 */
0x1f, 0xe0, /* 000111111110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0xf0, /* 000000001111 */
0x00, 0x00, /* 000000000000 */
/* 114 0x72 'r' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x73, 0x80, /* 011100111000 */
0x34, 0xc0, /* 001101001100 */
0x38, 0xc0, /* 001110001100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x78, 0x00, /* 011110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 115 0x73 's' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xc0, /* 000111111100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x40, /* 001100000100 */
0x38, 0x00, /* 001110000000 */
0x1e, 0x00, /* 000111100000 */
0x07, 0x80, /* 000001111000 */
0x01, 0xc0, /* 000000011100 */
0x20, 0xc0, /* 001000001100 */
0x30, 0xc0, /* 001100001100 */
0x3f, 0x80, /* 001111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 116 0x74 't' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x0c, 0x00, /* 000011000000 */
0x7f, 0xc0, /* 011111111100 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x20, /* 000011000010 */
0x0e, 0x40, /* 000011100100 */
0x07, 0x80, /* 000001111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 117 0x75 'u' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 118 0x76 'v' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 119 0x77 'w' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x70, /* 111111110111 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x37, 0x40, /* 001101110100 */
0x3b, 0x40, /* 001110110100 */
0x3b, 0x40, /* 001110110100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 120 0x78 'x' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf8, 0xf0, /* 111110001111 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1d, 0x00, /* 000111010000 */
0x0e, 0x00, /* 000011100000 */
0x07, 0x00, /* 000001110000 */
0x0b, 0x80, /* 000010111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0xf1, 0xf0, /* 111100011111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 121 0x79 'y' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xf0, /* 111100001111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x0c, 0x00, /* 000011000000 */
0x08, 0x00, /* 000010000000 */
0x78, 0x00, /* 011110000000 */
0x70, 0x00, /* 011100000000 */
0x00, 0x00, /* 000000000000 */
/* 122 0x7a 'z' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0xe0, /* 011000001110 */
0x41, 0xc0, /* 010000011100 */
0x03, 0x80, /* 000000111000 */
0x07, 0x00, /* 000001110000 */
0x0e, 0x00, /* 000011100000 */
0x1c, 0x00, /* 000111000000 */
0x38, 0x20, /* 001110000010 */
0x70, 0x60, /* 011100000110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 123 0x7b '{' */
0x00, 0x00, /* 000000000000 */
0x03, 0x80, /* 000000111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x38, 0x00, /* 001110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x80, /* 000000111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 124 0x7c '|' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
/* 125 0x7d '}' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x00, /* 000111000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0xc0, /* 000000011100 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1c, 0x00, /* 000111000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 126 0x7e '~' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1c, 0x20, /* 000111000010 */
0x3e, 0x60, /* 001111100110 */
0x67, 0xc0, /* 011001111100 */
0x43, 0x80, /* 010000111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 127 0x7f '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
/* 128 0x80 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x60, /* 000100000110 */
0x20, 0x20, /* 001000000010 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x20, 0x00, /* 001000000000 */
0x30, 0x20, /* 001100000010 */
0x18, 0x40, /* 000110000100 */
0x0f, 0x80, /* 000011111000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 129 0x81 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 130 0x82 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 131 0x83 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 132 0x84 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 133 0x85 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 134 0x86 '.' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x07, 0x00, /* 000001110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 135 0x87 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x31, 0xc0, /* 001100011100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x70, 0x40, /* 011100000100 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 136 0x88 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 137 0x89 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 138 0x8a '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 139 0x8b '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 140 0x8c '.' */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1b, 0x00, /* 000110110000 */
0x31, 0x80, /* 001100011000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 141 0x8d '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 142 0x8e '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x19, 0x80, /* 000110011000 */
0x11, 0x80, /* 000100011000 */
0x3f, 0xc0, /* 001111111100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x60, /* 011000000110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 143 0x8f '.' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x04, 0x00, /* 000001000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x19, 0x80, /* 000110011000 */
0x11, 0x80, /* 000100011000 */
0x3f, 0xc0, /* 001111111100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x60, /* 011000000110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 144 0x90 '.' */
0x00, 0x00, /* 000000000000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x08, 0x00, /* 000010000000 */
0x7f, 0xe0, /* 011111111110 */
0x30, 0x20, /* 001100000010 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x80, /* 001100001000 */
0x3f, 0x80, /* 001111111000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x20, /* 001100000010 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 145 0x91 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3d, 0xe0, /* 001111011110 */
0x66, 0x30, /* 011001100011 */
0x46, 0x30, /* 010001100011 */
0x06, 0x30, /* 000001100011 */
0x3f, 0xf0, /* 001111111111 */
0x66, 0x00, /* 011001100000 */
0xc6, 0x00, /* 110001100000 */
0xc6, 0x00, /* 110001100000 */
0xe7, 0x30, /* 111001110011 */
0x7d, 0xe0, /* 011111011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 146 0x92 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x03, 0xf0, /* 000000111111 */
0x07, 0x10, /* 000001110001 */
0x07, 0x10, /* 000001110001 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x20, /* 000010110010 */
0x13, 0xe0, /* 000100111110 */
0x13, 0x20, /* 000100110010 */
0x3f, 0x00, /* 001111110000 */
0x23, 0x00, /* 001000110000 */
0x23, 0x00, /* 001000110000 */
0x43, 0x10, /* 010000110001 */
0x43, 0x10, /* 010000110001 */
0xe7, 0xf0, /* 111001111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 147 0x93 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 148 0x94 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 149 0x95 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 150 0x96 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 151 0x97 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 152 0x98 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xf0, /* 111100001111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x0c, 0x00, /* 000011000000 */
0x08, 0x00, /* 000010000000 */
0x78, 0x00, /* 011110000000 */
0x70, 0x00, /* 011100000000 */
0x00, 0x00, /* 000000000000 */
/* 153 0x99 '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x60, /* 001000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x20, 0x40, /* 001000000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 154 0x9a '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0xe0, 0x30, /* 111000000011 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x70, 0x40, /* 011100000100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 155 0x9b '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x36, 0xc0, /* 001101101100 */
0x26, 0xc0, /* 001001101100 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x76, 0x40, /* 011101100100 */
0x36, 0xc0, /* 001101101100 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 156 0x9c '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x1c, 0xc0, /* 000111001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x7e, 0x00, /* 011111100000 */
0x7e, 0x00, /* 011111100000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x3e, 0x20, /* 001111100010 */
0x7f, 0xe0, /* 011111111110 */
0x61, 0xc0, /* 011000011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 157 0x9d '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 158 0x9e '.' */
0x00, 0x00, /* 000000000000 */
0x7f, 0x80, /* 011111111000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0xc0, /* 001100001100 */
0x37, 0x80, /* 001101111000 */
0x30, 0x00, /* 001100000000 */
0x33, 0x00, /* 001100110000 */
0x37, 0x80, /* 001101111000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x30, /* 001100110011 */
0x31, 0xe0, /* 001100011110 */
0x78, 0xc0, /* 011110001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 159 0x9f '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0xc0, /* 000000001100 */
0x01, 0xe0, /* 000000011110 */
0x03, 0x30, /* 000000110011 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x3f, 0xe0, /* 001111111110 */
0x7f, 0xc0, /* 011111111100 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xcc, 0x00, /* 110011000000 */
0x78, 0x00, /* 011110000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
/* 160 0xa0 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 161 0xa1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 162 0xa2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 163 0xa3 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 164 0xa4 '.' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x40, /* 000111000100 */
0x3f, 0xc0, /* 001111111100 */
0x23, 0x80, /* 001000111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x27, 0x80, /* 001001111000 */
0x79, 0xc0, /* 011110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 165 0xa5 '.' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x40, /* 000111000100 */
0x3f, 0xc0, /* 001111111100 */
0x23, 0x80, /* 001000111000 */
0xc0, 0x70, /* 110000000111 */
0x60, 0x20, /* 011000000010 */
0x70, 0x20, /* 011100000010 */
0x78, 0x20, /* 011110000010 */
0x5c, 0x20, /* 010111000010 */
0x4e, 0x20, /* 010011100010 */
0x47, 0x20, /* 010001110010 */
0x43, 0xa0, /* 010000111010 */
0x41, 0xe0, /* 010000011110 */
0x40, 0xe0, /* 010000001110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0x30, /* 111000000011 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 166 0xa6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x01, 0x80, /* 000000011000 */
0x07, 0x80, /* 000001111000 */
0x19, 0x80, /* 000110011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x33, 0x80, /* 001100111000 */
0x1d, 0xc0, /* 000111011100 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 167 0xa7 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x19, 0x80, /* 000110011000 */
0x10, 0xc0, /* 000100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x80, /* 001100001000 */
0x19, 0x80, /* 000110011000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 168 0xa8 '.' */
0x00, 0x00, /* 000000000000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x40, /* 001100000100 */
0x39, 0xc0, /* 001110011100 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 169 0xa9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 170 0xaa '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 171 0xab '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x10, 0x00, /* 000100000000 */
0x30, 0x00, /* 001100000000 */
0x10, 0x00, /* 000100000000 */
0x10, 0x40, /* 000100000100 */
0x10, 0x80, /* 000100001000 */
0x11, 0x00, /* 000100010000 */
0x3a, 0x00, /* 001110100000 */
0x05, 0xc0, /* 000001011100 */
0x0a, 0x20, /* 000010100010 */
0x10, 0x20, /* 000100000010 */
0x20, 0xc0, /* 001000001100 */
0x41, 0x00, /* 010000010000 */
0x02, 0x00, /* 000000100000 */
0x03, 0xe0, /* 000000111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 172 0xac '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x10, 0x00, /* 000100000000 */
0x30, 0x00, /* 001100000000 */
0x10, 0x00, /* 000100000000 */
0x10, 0x40, /* 000100000100 */
0x10, 0x80, /* 000100001000 */
0x11, 0x00, /* 000100010000 */
0x3a, 0x40, /* 001110100100 */
0x04, 0xc0, /* 000001001100 */
0x09, 0x40, /* 000010010100 */
0x12, 0x40, /* 000100100100 */
0x24, 0x40, /* 001001000100 */
0x47, 0xe0, /* 010001111110 */
0x00, 0x40, /* 000000000100 */
0x00, 0x40, /* 000000000100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 173 0xad '.' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 174 0xae '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x60, /* 000001100110 */
0x0c, 0xc0, /* 000011001100 */
0x19, 0x80, /* 000110011000 */
0x33, 0x00, /* 001100110000 */
0x66, 0x00, /* 011001100000 */
0x33, 0x00, /* 001100110000 */
0x19, 0x80, /* 000110011000 */
0x0c, 0xc0, /* 000011001100 */
0x06, 0x60, /* 000001100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 175 0xaf '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x66, 0x00, /* 011001100000 */
0x33, 0x00, /* 001100110000 */
0x19, 0x80, /* 000110011000 */
0x0c, 0xc0, /* 000011001100 */
0x06, 0x60, /* 000001100110 */
0x0c, 0xc0, /* 000011001100 */
0x19, 0x80, /* 000110011000 */
0x33, 0x00, /* 001100110000 */
0x66, 0x00, /* 011001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 176 0xb0 '.' */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
/* 177 0xb1 '.' */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
0x88, 0x80, /* 100010001000 */
0x22, 0x20, /* 001000100010 */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
0x88, 0x80, /* 100010001000 */
0x22, 0x20, /* 001000100010 */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
0x88, 0x80, /* 100010001000 */
0x22, 0x20, /* 001000100010 */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
/* 178 0xb2 '.' */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
/* 179 0xb3 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 180 0xb4 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 181 0xb5 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 182 0xb6 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 183 0xb7 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 184 0xb8 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 185 0xb9 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x01, 0x80, /* 000000011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 186 0xba '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 187 0xbb '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x01, 0x80, /* 000000011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 188 0xbc '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x01, 0x80, /* 000000011000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 189 0xbd '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 190 0xbe '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 191 0xbf '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 192 0xc0 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 193 0xc1 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 194 0xc2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 195 0xc3 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 196 0xc4 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 197 0xc5 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 198 0xc6 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 199 0xc7 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 200 0xc8 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0c, 0x00, /* 000011000000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 201 0xc9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x0c, 0x00, /* 000011000000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 202 0xca '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 203 0xcb '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 204 0xcc '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0c, 0x00, /* 000011000000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 205 0xcd '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 206 0xce '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x00, 0x00, /* 000000000000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 207 0xcf '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 208 0xd0 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 209 0xd1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 210 0xd2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 211 0xd3 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 212 0xd4 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 213 0xd5 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 214 0xd6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 215 0xd7 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 216 0xd8 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 217 0xd9 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 218 0xda '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 219 0xdb '.' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 220 0xdc '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 221 0xdd '.' */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
/* 222 0xde '.' */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
/* 223 0xdf '.' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 224 0xe0 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x60, /* 000011110110 */
0x13, 0xe0, /* 000100111110 */
0x21, 0xc0, /* 001000011100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x70, 0x80, /* 011100001000 */
0x39, 0xc0, /* 001110011100 */
0x1f, 0x60, /* 000111110110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 225 0xe1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x31, 0x80, /* 001100011000 */
0x37, 0x80, /* 001101111000 */
0x31, 0x80, /* 001100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x31, 0x80, /* 001100011000 */
0x77, 0x00, /* 011101110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 226 0xe2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xe0, /* 001111111110 */
0x3f, 0xe0, /* 001111111110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 227 0xe3 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 228 0xe4 '.' */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x60, /* 011000000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x00, /* 001100000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x60, /* 001100000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 229 0xe5 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0xe0, /* 000001111110 */
0x0f, 0xe0, /* 000011111110 */
0x13, 0x80, /* 000100111000 */
0x21, 0xc0, /* 001000011100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x70, 0x80, /* 011100001000 */
0x39, 0x00, /* 001110010000 */
0x1e, 0x00, /* 000111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 230 0xe6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x36, 0xe0, /* 001101101110 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 231 0xe7 '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x3f, 0xc0, /* 001111111100 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 232 0xe8 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 233 0xe9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 234 0xea '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0xd9, 0xb0, /* 110110011011 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 235 0xeb '.' */
0x00, 0x00, /* 000000000000 */
0x07, 0x80, /* 000001111000 */
0x0c, 0xc0, /* 000011001100 */
0x18, 0x60, /* 000110000110 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 236 0xec '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x39, 0xc0, /* 001110011100 */
0x6f, 0x60, /* 011011110110 */
0x66, 0x60, /* 011001100110 */
0xc6, 0x30, /* 110001100011 */
0xc6, 0x30, /* 110001100011 */
0x66, 0x60, /* 011001100110 */
0x6f, 0x60, /* 011011110110 */
0x39, 0xc0, /* 001110011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 237 0xed '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x3b, 0xc0, /* 001110111100 */
0x6f, 0x60, /* 011011110110 */
0x66, 0x60, /* 011001100110 */
0xc6, 0x30, /* 110001100011 */
0xc6, 0x30, /* 110001100011 */
0x66, 0x60, /* 011001100110 */
0x6f, 0x60, /* 011011110110 */
0x3d, 0xc0, /* 001111011100 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 238 0xee '.' */
0x00, 0x00, /* 000000000000 */
0x01, 0xc0, /* 000000011100 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x1f, 0xc0, /* 000111111100 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 239 0xef '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x39, 0xc0, /* 001110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 240 0xf0 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 241 0xf1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 242 0xf2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0x38, 0x00, /* 001110000000 */
0x0e, 0x00, /* 000011100000 */
0x03, 0x80, /* 000000111000 */
0x00, 0xe0, /* 000000001110 */
0x00, 0xe0, /* 000000001110 */
0x03, 0x80, /* 000000111000 */
0x0e, 0x00, /* 000011100000 */
0x38, 0x00, /* 001110000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 243 0xf3 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x01, 0xc0, /* 000000011100 */
0x07, 0x00, /* 000001110000 */
0x1c, 0x00, /* 000111000000 */
0x70, 0x00, /* 011100000000 */
0x70, 0x00, /* 011100000000 */
0x1c, 0x00, /* 000111000000 */
0x07, 0x00, /* 000001110000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x60, /* 000000000110 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 244 0xf4 '.' */
0x00, 0x00, /* 000000000000 */
0x03, 0x80, /* 000000111000 */
0x07, 0xc0, /* 000001111100 */
0x0c, 0x60, /* 000011000110 */
0x0c, 0x60, /* 000011000110 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
/* 245 0xf5 '.' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x00, /* 000111000000 */
0x3e, 0x00, /* 001111100000 */
0x63, 0x00, /* 011000110000 */
0x63, 0x00, /* 011000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
/* 246 0xf6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 247 0xf7 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x38, 0x00, /* 001110000000 */
0x6c, 0x00, /* 011011000000 */
0x06, 0x30, /* 000001100011 */
0x03, 0x60, /* 000000110110 */
0x39, 0xc0, /* 001110011100 */
0x6c, 0x00, /* 011011000000 */
0x06, 0x30, /* 000001100011 */
0x03, 0x60, /* 000000110110 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 248 0xf8 '.' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 249 0xf9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1c, 0x00, /* 000111000000 */
0x3e, 0x00, /* 001111100000 */
0x3e, 0x00, /* 001111100000 */
0x3e, 0x00, /* 001111100000 */
0x1c, 0x00, /* 000111000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 250 0xfa '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x3c, 0x00, /* 001111000000 */
0x3c, 0x00, /* 001111000000 */
0x18, 0x00, /* 000110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 251 0xfb '.' */
0x00, 0x00, /* 000000000000 */
0x07, 0xe0, /* 000001111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xc6, 0x00, /* 110001100000 */
0x66, 0x00, /* 011001100000 */
0x36, 0x00, /* 001101100000 */
0x1e, 0x00, /* 000111100000 */
0x0e, 0x00, /* 000011100000 */
0x06, 0x00, /* 000001100000 */
0x02, 0x00, /* 000000100000 */
0x00, 0x00, /* 000000000000 */
/* 252 0xfc '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x13, 0x80, /* 000100111000 */
0x3d, 0xc0, /* 001111011100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x3d, 0xe0, /* 001111011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 253 0xfd '.' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x31, 0x80, /* 001100011000 */
0x21, 0x80, /* 001000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x40, /* 000110000100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 254 0xfe '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 255 0xff '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
} };
const struct font_desc font_sun_12x22 = {
.idx = SUN12x22_IDX,
.name = "SUN12x22",
.width = 12,
.height = 22,
.charcount = 256,
.data = fontdata_sun12x22.data,
#ifdef __sparc__
.pref = 5,
#else
.pref = -1,
#endif
};
| linux-master | lib/fonts/font_sun12x22.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.