func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
**/
CImgList<T>& operator=(const char *const filename) {
return assign(filename);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 325,514,307,192,906,800,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static void update_ple_window_actual_max(void)
{
ple_window_actual_max =
__shrink_ple_window(max(ple_window_max, ple_window),
ple_window_grow, INT_MIN);
}
| 0 |
[] |
kvm
|
a642fc305053cc1c6e47e4f4df327895747ab485
| 95,293,415,682,203,680,000,000,000,000,000,000,000 | 6 |
kvm: vmx: handle invvpid vm exit gracefully
On systems with invvpid instruction support (corresponding bit in
IA32_VMX_EPT_VPID_CAP MSR is set) guest invocation of invvpid
causes vm exit, which is currently not handled and results in
propagation of unknown exit to userspace.
Fix this by installing an invvpid vm exit handler.
This is CVE-2014-3646.
Cc: [email protected]
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void)
{
return (OPENSSL_ia32cap_P[1] & AESNI_CAPABLE ?
&aesni_128_cbc_hmac_sha1_cipher : NULL);
}
| 0 |
[
"CWE-310"
] |
openssl
|
4159f311671cf3bac03815e5de44681eb758304a
| 332,309,149,628,991,450,000,000,000,000,000,000,000 | 5 |
Check that we have enough padding characters.
Reviewed-by: Emilia Käsper <[email protected]>
CVE-2016-2107
MR: #2572
|
void CLASS process_Sony_0x940e(uchar *buf, ushort len, unsigned id)
{
if (((imgdata.makernotes.sony.SonyCameraType != LIBRAW_SONY_SLT) &&
(imgdata.makernotes.sony.SonyCameraType != LIBRAW_SONY_ILCA)) ||
(id == 280) ||
(id == 281) ||
(id == 285))
return;
if (imgdata.makernotes.sony.SonyCameraType == LIBRAW_SONY_ILCA)
{
if (len >= 0x06)
{
imgdata.shootinginfo.FocusMode = SonySubstitution[buf[0x05]];
}
if (len >= 0x0051)
{
imgdata.makernotes.sony.AFMicroAdjValue = SonySubstitution[buf[0x0050]];
}
}
else
{
if (len >= 0x0c)
{
imgdata.shootinginfo.FocusMode = SonySubstitution[buf[0x0b]];
}
if (len >= 0x017e)
{
imgdata.makernotes.sony.AFMicroAdjValue = SonySubstitution[buf[0x017d]];
}
}
if (imgdata.makernotes.sony.AFMicroAdjValue != 0)
imgdata.makernotes.sony.AFMicroAdjOn = 1;
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
LibRaw
|
e47384546b43d0fd536e933249047bc397a4d88b
| 321,438,698,447,478,900,000,000,000,000,000,000,000 | 35 |
Secunia Advisory SA83050: possible infinite loop in parse_minolta()
|
static int l2cap_get_mode(struct l2cap_chan *chan)
{
switch (chan->mode) {
case L2CAP_MODE_BASIC:
return BT_MODE_BASIC;
case L2CAP_MODE_ERTM:
return BT_MODE_ERTM;
case L2CAP_MODE_STREAMING:
return BT_MODE_STREAMING;
case L2CAP_MODE_LE_FLOWCTL:
return BT_MODE_LE_FLOWCTL;
case L2CAP_MODE_EXT_FLOWCTL:
return BT_MODE_EXT_FLOWCTL;
}
return -EINVAL;
}
| 0 |
[
"CWE-787"
] |
linux
|
1bff51ea59a9afb67d2dd78518ab0582a54a472c
| 106,154,525,530,720,960,000,000,000,000,000,000,000 | 17 |
Bluetooth: fix use-after-free error in lock_sock_nested()
use-after-free error in lock_sock_nested is reported:
[ 179.140137][ T3731] =====================================================
[ 179.142675][ T3731] BUG: KMSAN: use-after-free in lock_sock_nested+0x280/0x2c0
[ 179.145494][ T3731] CPU: 4 PID: 3731 Comm: kworker/4:2 Not tainted 5.12.0-rc6+ #54
[ 179.148432][ T3731] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
[ 179.151806][ T3731] Workqueue: events l2cap_chan_timeout
[ 179.152730][ T3731] Call Trace:
[ 179.153301][ T3731] dump_stack+0x24c/0x2e0
[ 179.154063][ T3731] kmsan_report+0xfb/0x1e0
[ 179.154855][ T3731] __msan_warning+0x5c/0xa0
[ 179.155579][ T3731] lock_sock_nested+0x280/0x2c0
[ 179.156436][ T3731] ? kmsan_get_metadata+0x116/0x180
[ 179.157257][ T3731] l2cap_sock_teardown_cb+0xb8/0x890
[ 179.158154][ T3731] ? __msan_metadata_ptr_for_load_8+0x10/0x20
[ 179.159141][ T3731] ? kmsan_get_metadata+0x116/0x180
[ 179.159994][ T3731] ? kmsan_get_shadow_origin_ptr+0x84/0xb0
[ 179.160959][ T3731] ? l2cap_sock_recv_cb+0x420/0x420
[ 179.161834][ T3731] l2cap_chan_del+0x3e1/0x1d50
[ 179.162608][ T3731] ? kmsan_get_metadata+0x116/0x180
[ 179.163435][ T3731] ? kmsan_get_shadow_origin_ptr+0x84/0xb0
[ 179.164406][ T3731] l2cap_chan_close+0xeea/0x1050
[ 179.165189][ T3731] ? kmsan_internal_unpoison_shadow+0x42/0x70
[ 179.166180][ T3731] l2cap_chan_timeout+0x1da/0x590
[ 179.167066][ T3731] ? __msan_metadata_ptr_for_load_8+0x10/0x20
[ 179.168023][ T3731] ? l2cap_chan_create+0x560/0x560
[ 179.168818][ T3731] process_one_work+0x121d/0x1ff0
[ 179.169598][ T3731] worker_thread+0x121b/0x2370
[ 179.170346][ T3731] kthread+0x4ef/0x610
[ 179.171010][ T3731] ? process_one_work+0x1ff0/0x1ff0
[ 179.171828][ T3731] ? kthread_blkcg+0x110/0x110
[ 179.172587][ T3731] ret_from_fork+0x1f/0x30
[ 179.173348][ T3731]
[ 179.173752][ T3731] Uninit was created at:
[ 179.174409][ T3731] kmsan_internal_poison_shadow+0x5c/0xf0
[ 179.175373][ T3731] kmsan_slab_free+0x76/0xc0
[ 179.176060][ T3731] kfree+0x3a5/0x1180
[ 179.176664][ T3731] __sk_destruct+0x8af/0xb80
[ 179.177375][ T3731] __sk_free+0x812/0x8c0
[ 179.178032][ T3731] sk_free+0x97/0x130
[ 179.178686][ T3731] l2cap_sock_release+0x3d5/0x4d0
[ 179.179457][ T3731] sock_close+0x150/0x450
[ 179.180117][ T3731] __fput+0x6bd/0xf00
[ 179.180787][ T3731] ____fput+0x37/0x40
[ 179.181481][ T3731] task_work_run+0x140/0x280
[ 179.182219][ T3731] do_exit+0xe51/0x3e60
[ 179.182930][ T3731] do_group_exit+0x20e/0x450
[ 179.183656][ T3731] get_signal+0x2dfb/0x38f0
[ 179.184344][ T3731] arch_do_signal_or_restart+0xaa/0xe10
[ 179.185266][ T3731] exit_to_user_mode_prepare+0x2d2/0x560
[ 179.186136][ T3731] syscall_exit_to_user_mode+0x35/0x60
[ 179.186984][ T3731] do_syscall_64+0xc5/0x140
[ 179.187681][ T3731] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 179.188604][ T3731] =====================================================
In our case, there are two Thread A and B:
Context: Thread A: Context: Thread B:
l2cap_chan_timeout() __se_sys_shutdown()
l2cap_chan_close() l2cap_sock_shutdown()
l2cap_chan_del() l2cap_chan_close()
l2cap_sock_teardown_cb() l2cap_sock_teardown_cb()
Once l2cap_sock_teardown_cb() excuted, this sock will be marked as SOCK_ZAPPED,
and can be treated as killable in l2cap_sock_kill() if sock_orphan() has
excuted, at this time we close sock through sock_close() which end to call
l2cap_sock_kill() like Thread C:
Context: Thread C:
sock_close()
l2cap_sock_release()
sock_orphan()
l2cap_sock_kill() #free sock if refcnt is 1
If C completed, Once A or B reaches l2cap_sock_teardown_cb() again,
use-after-free happened.
We should set chan->data to NULL if sock is destructed, for telling teardown
operation is not allowed in l2cap_sock_teardown_cb(), and also we should
avoid killing an already killed socket in l2cap_sock_close_cb().
Signed-off-by: Wang ShaoBo <[email protected]>
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
struct xt_table_info *xt_alloc_table_info(unsigned int size)
{
struct xt_table_info *info = NULL;
size_t sz = sizeof(*info) + size;
if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
return NULL;
info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
if (!info)
return NULL;
memset(info, 0, sizeof(*info));
info->size = size;
return info;
}
| 0 |
[] |
linux
|
175e476b8cdf2a4de7432583b49c871345e4f8a1
| 68,997,305,442,879,370,000,000,000,000,000,000,000 | 16 |
netfilter: x_tables: Use correct memory barriers.
When a new table value was assigned, it was followed by a write memory
barrier. This ensured that all writes before this point would complete
before any writes after this point. However, to determine whether the
rules are unused, the sequence counter is read. To ensure that all
writes have been done before these reads, a full memory barrier is
needed, not just a write memory barrier. The same argument applies when
incrementing the counter, before the rules are read.
Changing to using smp_mb() instead of smp_wmb() fixes the kernel panic
reported in cc00bcaa5899 (which is still present), while still
maintaining the same speed of replacing tables.
The smb_mb() barriers potentially slow the packet path, however testing
has shown no measurable change in performance on a 4-core MIPS64
platform.
Fixes: 7f5c6d4f665b ("netfilter: get rid of atomic ops in fast path")
Signed-off-by: Mark Tomlinson <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
static void add_protected_variable(char *varname) /* {{{ */
{
normalize_protected_variable(varname);
zend_hash_str_add_empty_element(&PG(rfc1867_protected_variables), varname, strlen(varname));
}
| 0 |
[
"CWE-190"
] |
php-src
|
a3924ab6542a358a3099de992b63b932a9570add
| 206,884,969,552,898,540,000,000,000,000,000,000,000 | 5 |
Merge branch 'PHP-7.3' into PHP-7.4
* PHP-7.3:
Fix #78876: Long variables cause OOM and temp files are not cleaned
Fix #78875: Long filenames cause OOM and temp files are not cleaned
Update NEWS for 7.2.31
Update CREDITS for PHP 7.2.30
Update NEWS for PHP 7.2.30
|
get_locale_langs_from_localed_dbus (GDBusProxy *proxy, GPtrArray *langs)
{
g_autoptr(GVariant) locale_variant = NULL;
g_autofree const gchar **strv = NULL;
gsize i, j;
locale_variant = g_dbus_proxy_get_cached_property (proxy, "Locale");
if (locale_variant == NULL)
return;
strv = g_variant_get_strv (locale_variant, NULL);
for (i = 0; strv[i]; i++)
{
const gchar *locale = NULL;
g_autofree char *lang = NULL;
/* See locale(7) for these categories */
const char* const categories[] = { "LANG=", "LC_ALL=", "LC_MESSAGES=", "LC_ADDRESS=",
"LC_COLLATE=", "LC_CTYPE=", "LC_IDENTIFICATION=",
"LC_MONETARY=", "LC_MEASUREMENT=", "LC_NAME=",
"LC_NUMERIC=", "LC_PAPER=", "LC_TELEPHONE=",
"LC_TIME=", NULL };
for (j = 0; categories[j]; j++)
{
if (g_str_has_prefix (strv[i], categories[j]))
{
locale = strv[i] + strlen (categories[j]);
break;
}
}
if (locale == NULL || strcmp (locale, "") == 0)
continue;
lang = flatpak_get_lang_from_locale (locale);
if (lang != NULL && !flatpak_g_ptr_array_contains_string (langs, lang))
g_ptr_array_add (langs, g_steal_pointer (&lang));
}
}
| 0 |
[
"CWE-668"
] |
flatpak
|
cd2142888fc4c199723a0dfca1f15ea8788a5483
| 4,062,612,450,017,180,000,000,000,000,000,000,000 | 41 |
Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this.
|
static int slc_open(struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
if (sl->tty == NULL)
return -ENODEV;
sl->flags &= (1 << SLF_INUSE);
netif_start_queue(dev);
return 0;
}
| 0 |
[
"CWE-200",
"CWE-909",
"CWE-908"
] |
linux
|
b9258a2cece4ec1f020715fe3554bc2e360f6264
| 240,719,623,047,720,360,000,000,000,000,000,000,000 | 11 |
slcan: Don't transmit uninitialized stack data in padding
struct can_frame contains some padding which is not explicitly zeroed in
slc_bump. This uninitialized data will then be transmitted if the stack
initialization hardening feature is not enabled (CONFIG_INIT_STACK_ALL).
This commit just zeroes the whole struct including the padding.
Signed-off-by: Richard Palethorpe <[email protected]>
Fixes: a1044e36e457 ("can: add slcan driver for serial/USB-serial CAN adapters")
Reviewed-by: Kees Cook <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Acked-by: Marc Kleine-Budde <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int getnet( unsigned char* capa, int filter, int force)
{
unsigned char *bssid;
if(opt.nodetect)
return 0;
if(filter)
bssid = opt.f_bssid;
else
bssid = opt.r_bssid;
if( memcmp(bssid, NULL_MAC, 6) )
{
PCT; printf("Waiting for beacon frame (BSSID: %02X:%02X:%02X:%02X:%02X:%02X) on channel %d\n",
bssid[0],bssid[1],bssid[2],bssid[3],bssid[4],bssid[5],wi_get_channel(_wi_in));
}
else if(strlen(opt.r_essid) > 0)
{
PCT; printf("Waiting for beacon frame (ESSID: %s) on channel %d\n", opt.r_essid,wi_get_channel(_wi_in));
}
else if(force)
{
PCT;
if(filter)
{
printf("Please specify at least a BSSID (-b) or an ESSID (-e)\n");
}
else
{
printf("Please specify at least a BSSID (-a) or an ESSID (-e)\n");
}
return( 1 );
}
else
return 0;
if( attack_check(bssid, opt.r_essid, capa, _wi_in) != 0)
{
if(memcmp(bssid, NULL_MAC, 6))
{
if( strlen(opt.r_essid) == 0 || opt.r_essid[0] < 32)
{
printf( "Please specify an ESSID (-e).\n" );
}
}
if(!memcmp(bssid, NULL_MAC, 6))
{
if(strlen(opt.r_essid) > 0)
{
printf( "Please specify a BSSID (-a).\n" );
}
}
return( 1 );
}
return 0;
}
| 0 |
[
"CWE-787"
] |
aircrack-ng
|
091b153f294b9b695b0b2831e65936438b550d7b
| 7,257,231,617,996,819,000,000,000,000,000,000,000 | 60 |
Aireplay-ng: Fixed tcp_test stack overflow (Closes #14 on GitHub).
git-svn-id: http://svn.aircrack-ng.org/trunk@2417 28c6078b-6c39-48e3-add9-af49d547ecab
|
Item* Item::set_expr_cache(THD *thd)
{
DBUG_ENTER("Item::set_expr_cache");
Item_cache_wrapper *wrapper;
if ((wrapper= new Item_cache_wrapper(this)) &&
!wrapper->fix_fields(thd, (Item**)&wrapper))
{
if (wrapper->set_cache(thd))
DBUG_RETURN(NULL);
DBUG_RETURN(wrapper);
}
DBUG_RETURN(NULL);
}
| 0 |
[] |
server
|
b000e169562697aa072600695d4f0c0412f94f4f
| 145,429,882,611,552,090,000,000,000,000,000,000,000 | 13 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
static int netlbl_cipsov4_add_common(struct genl_info *info,
struct cipso_v4_doi *doi_def)
{
struct nlattr *nla;
int nla_rem;
u32 iter = 0;
doi_def->doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]);
if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_TAGLST],
NLBL_CIPSOV4_A_MAX,
netlbl_cipsov4_genl_policy) != 0)
return -EINVAL;
nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem)
if (nla->nla_type == NLBL_CIPSOV4_A_TAG) {
if (iter >= CIPSO_V4_TAG_MAXCNT)
return -EINVAL;
doi_def->tags[iter++] = nla_get_u8(nla);
}
while (iter < CIPSO_V4_TAG_MAXCNT)
doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID;
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
2a2f11c227bdf292b3a2900ad04139d301b56ac4
| 329,880,734,398,743,400,000,000,000,000,000,000,000 | 25 |
NetLabel: correct CIPSO tag handling when adding new DOI definitions
The current netlbl_cipsov4_add_common() function has two problems which are
fixed with this patch. The first is an off-by-one bug where it is possibile to
overflow the doi_def->tags[] array. The second is a bug where the same
doi_def->tags[] array was not always fully initialized, which caused sporadic
failures.
Signed-off-by: Paul Moore <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
static void calc_sha256_hash(h2_push_diary *diary, apr_uint64_t *phash, h2_push *push)
{
SHA256_CTX sha256;
apr_uint64_t val;
unsigned char hash[SHA256_DIGEST_LENGTH];
int i;
SHA256_Init(&sha256);
sha256_update(&sha256, push->req->scheme);
sha256_update(&sha256, "://");
sha256_update(&sha256, push->req->authority);
sha256_update(&sha256, push->req->path);
SHA256_Final(hash, &sha256);
val = 0;
for (i = 0; i != sizeof(val); ++i)
val = val * 256 + hash[i];
*phash = val >> (64 - diary->mask_bits);
}
| 0 |
[
"CWE-444"
] |
mod_h2
|
b8a8c5061eada0ce3339b24ba1d587134552bc0c
| 229,024,054,413,110,100,000,000,000,000,000,000,000 | 19 |
* Removing support for abandoned draft of http-wg regarding cache-digests.
|
static int do_directive(Token *tline, Token **output)
{
enum preproc_token op;
int j;
bool err;
enum nolist_flags nolist;
bool casesense;
int k, m;
int offset;
const char *p;
char *q, *qbuf;
const char *found_path;
const char *mname;
struct ppscan pps;
Include *inc;
Context *ctx;
Cond *cond;
MMacro *mmac, **mmhead;
Token *t = NULL, *tt, *macro_start, *last, *origline;
Line *l;
struct tokenval tokval;
expr *evalresult;
int64_t count;
size_t len;
errflags severity;
const char *dname; /* Name of directive, for messages */
*output = NULL; /* No output generated */
origline = tline;
if (tok_is(tline, '#')) {
/* cpp-style line directive */
if (!tok_white(tline->next))
return NO_DIRECTIVE_FOUND;
dname = tok_text(tline);
goto pp_line;
}
tline = skip_white(tline);
if (!tline || !tok_type(tline, TOK_PREPROC_ID))
return NO_DIRECTIVE_FOUND;
dname = tok_text(tline);
if (dname[1] == '%')
return NO_DIRECTIVE_FOUND;
op = pp_token_hash(dname);
casesense = true;
if (PP_HAS_CASE(op) & PP_INSENSITIVE(op)) {
casesense = false;
op--;
}
/*
* %line directives are always processed immediately and
* unconditionally, as they are intended to reflect position
* in externally preprocessed sources.
*/
if (op == PP_LINE) {
pp_line:
/*
* Syntax is `%line nnn[+mmm] [filename]'
*/
if (pp_noline || istk->mstk.mstk)
goto done;
tline = tline->next;
tline = skip_white(tline);
if (!tok_type(tline, TOK_NUMBER)) {
nasm_nonfatal("`%s' expects line number", dname);
goto done;
}
k = readnum(tok_text(tline), &err);
m = 1;
tline = tline->next;
if (tok_is(tline, '+') || tok_is(tline, '-')) {
bool minus = tok_is(tline, '-');
tline = tline->next;
if (!tok_type(tline, TOK_NUMBER)) {
nasm_nonfatal("`%s' expects line increment", dname);
goto done;
}
m = readnum(tok_text(tline), &err);
if (minus)
m = -m;
tline = tline->next;
}
tline = skip_white(tline);
if (tline) {
if (tline->type == TOK_STRING) {
if (dname[0] == '#') {
/* cpp version: treat double quotes like NASM backquotes */
char *txt = tok_text_buf(tline);
if (txt[0] == '"') {
txt[0] = '`';
txt[tline->len - 1] = '`';
}
}
src_set_fname(unquote_token(tline));
/*
* Anything after the string is ignored by design (for cpp
* compatibility and future extensions.)
*/
} else {
char *fname = detoken(tline, false);
src_set_fname(fname);
nasm_free(fname);
}
}
src_set_linnum(k);
istk->where = src_where();
istk->lineinc = m;
goto done;
}
/*
* If we're in a non-emitting branch of a condition construct,
* or walking to the end of an already terminated %rep block,
* we should ignore all directives except for condition
* directives.
*/
if (((istk->conds && !emitting(istk->conds->state)) ||
(istk->mstk.mstk && !istk->mstk.mstk->in_progress)) &&
!is_condition(op)) {
return NO_DIRECTIVE_FOUND;
}
/*
* If we're defining a macro or reading a %rep block, we should
* ignore all directives except for %macro/%imacro (which nest),
* %endm/%endmacro, %line and (only if we're in a %rep block) %endrep.
* If we're in a %rep block, another %rep nests, so should be let through.
*/
if (defining && op != PP_MACRO && op != PP_RMACRO &&
op != PP_ENDMACRO && op != PP_ENDM &&
(defining->name || (op != PP_ENDREP && op != PP_REP))) {
return NO_DIRECTIVE_FOUND;
}
if (defining) {
if (op == PP_MACRO || op == PP_RMACRO) {
nested_mac_count++;
return NO_DIRECTIVE_FOUND;
} else if (nested_mac_count > 0) {
if (op == PP_ENDMACRO) {
nested_mac_count--;
return NO_DIRECTIVE_FOUND;
}
}
if (!defining->name) {
if (op == PP_REP) {
nested_rep_count++;
return NO_DIRECTIVE_FOUND;
} else if (nested_rep_count > 0) {
if (op == PP_ENDREP) {
nested_rep_count--;
return NO_DIRECTIVE_FOUND;
}
}
}
}
switch (op) {
default:
nasm_nonfatal("unknown preprocessor directive `%s'", dname);
return NO_DIRECTIVE_FOUND; /* didn't get it */
case PP_PRAGMA:
/*
* %pragma namespace options...
*
* The namespace "preproc" is reserved for the preprocessor;
* all other namespaces generate a [pragma] assembly directive.
*
* Invalid %pragmas are ignored and may have different
* meaning in future versions of NASM.
*/
t = tline;
tline = tline->next;
t->next = NULL;
tline = zap_white(expand_smacro(tline));
if (tok_type(tline, TOK_ID)) {
if (!nasm_stricmp(tok_text(tline), "preproc")) {
/* Preprocessor pragma */
do_pragma_preproc(tline);
free_tlist(tline);
} else {
/* Build the assembler directive */
/* Append bracket to the end of the output */
for (t = tline; t->next; t = t->next)
;
t->next = make_tok_char(NULL, ']');
/* Prepend "[pragma " */
t = new_White(tline);
t = new_Token(t, TOK_ID, "pragma", 6);
t = make_tok_char(t, '[');
tline = t;
*output = tline;
}
}
break;
case PP_STACKSIZE:
{
const char *arg;
/* Directive to tell NASM what the default stack size is. The
* default is for a 16-bit stack, and this can be overriden with
* %stacksize large.
*/
tline = skip_white(tline->next);
if (!tline || tline->type != TOK_ID) {
nasm_nonfatal("`%s' missing size parameter", dname);
break;
}
arg = tok_text(tline);
if (nasm_stricmp(arg, "flat") == 0) {
/* All subsequent ARG directives are for a 32-bit stack */
StackSize = 4;
StackPointer = "ebp";
ArgOffset = 8;
LocalOffset = 0;
} else if (nasm_stricmp(arg, "flat64") == 0) {
/* All subsequent ARG directives are for a 64-bit stack */
StackSize = 8;
StackPointer = "rbp";
ArgOffset = 16;
LocalOffset = 0;
} else if (nasm_stricmp(arg, "large") == 0) {
/* All subsequent ARG directives are for a 16-bit stack,
* far function call.
*/
StackSize = 2;
StackPointer = "bp";
ArgOffset = 4;
LocalOffset = 0;
} else if (nasm_stricmp(arg, "small") == 0) {
/* All subsequent ARG directives are for a 16-bit stack,
* far function call. We don't support near functions.
*/
StackSize = 2;
StackPointer = "bp";
ArgOffset = 6;
LocalOffset = 0;
} else {
nasm_nonfatal("`%s' invalid size type", dname);
}
break;
}
case PP_ARG:
/* TASM like ARG directive to define arguments to functions, in
* the following form:
*
* ARG arg1:WORD, arg2:DWORD, arg4:QWORD
*/
offset = ArgOffset;
do {
const char *arg;
char directive[256];
int size = StackSize;
/* Find the argument name */
tline = skip_white(tline->next);
if (!tline || tline->type != TOK_ID) {
nasm_nonfatal("`%s' missing argument parameter", dname);
goto done;
}
arg = tok_text(tline);
/* Find the argument size type */
tline = tline->next;
if (!tok_is(tline, ':')) {
nasm_nonfatal("syntax error processing `%s' directive", dname);
goto done;
}
tline = tline->next;
if (!tok_type(tline, TOK_ID)) {
nasm_nonfatal("`%s' missing size type parameter", dname);
goto done;
}
/* Allow macro expansion of type parameter */
tt = tokenize(tok_text(tline));
tt = expand_smacro(tt);
size = parse_size(tok_text(tt));
if (!size) {
nasm_nonfatal("invalid size type for `%s' missing directive", dname);
free_tlist(tt);
goto done;
}
free_tlist(tt);
/* Round up to even stack slots */
size = ALIGN(size, StackSize);
/* Now define the macro for the argument */
snprintf(directive, sizeof(directive), "%%define %s (%s+%d)",
arg, StackPointer, offset);
do_directive(tokenize(directive), output);
offset += size;
/* Move to the next argument in the list */
tline = skip_white(tline->next);
} while (tok_is(tline, ','));
ArgOffset = offset;
break;
case PP_LOCAL:
/* TASM like LOCAL directive to define local variables for a
* function, in the following form:
*
* LOCAL local1:WORD, local2:DWORD, local4:QWORD = LocalSize
*
* The '= LocalSize' at the end is ignored by NASM, but is
* required by TASM to define the local parameter size (and used
* by the TASM macro package).
*/
offset = LocalOffset;
do {
const char *local;
char directive[256];
int size = StackSize;
/* Find the argument name */
tline = skip_white(tline->next);
if (!tline || tline->type != TOK_ID) {
nasm_nonfatal("`%s' missing argument parameter", dname);
goto done;
}
local = tok_text(tline);
/* Find the argument size type */
tline = tline->next;
if (!tok_is(tline, ':')) {
nasm_nonfatal("syntax error processing `%s' directive", dname);
goto done;
}
tline = tline->next;
if (!tok_type(tline, TOK_ID)) {
nasm_nonfatal("`%s' missing size type parameter", dname);
goto done;
}
/* Allow macro expansion of type parameter */
tt = tokenize(tok_text(tline));
tt = expand_smacro(tt);
size = parse_size(tok_text(tt));
if (!size) {
nasm_nonfatal("invalid size type for `%s' missing directive", dname);
free_tlist(tt);
goto done;
}
free_tlist(tt);
/* Round up to even stack slots */
size = ALIGN(size, StackSize);
offset += size; /* Negative offset, increment before */
/* Now define the macro for the argument */
snprintf(directive, sizeof(directive), "%%define %s (%s-%d)",
local, StackPointer, offset);
do_directive(tokenize(directive), output);
/* Now define the assign to setup the enter_c macro correctly */
snprintf(directive, sizeof(directive),
"%%assign %%$localsize %%$localsize+%d", size);
do_directive(tokenize(directive), output);
/* Move to the next argument in the list */
tline = skip_white(tline->next);
} while (tok_is(tline, ','));
LocalOffset = offset;
break;
case PP_CLEAR:
{
bool context = false;
t = tline->next = expand_smacro(tline->next);
t = skip_white(t);
if (!t) {
/* Emulate legacy behavior */
do_clear(CLEAR_DEFINE|CLEAR_MMACRO, false);
} else {
while ((t = skip_white(t)) && t->type == TOK_ID) {
const char *txt = tok_text(t);
if (!nasm_stricmp(txt, "all")) {
do_clear(CLEAR_ALL, context);
} else if (!nasm_stricmp(txt, "define") ||
!nasm_stricmp(txt, "def") ||
!nasm_stricmp(txt, "smacro")) {
do_clear(CLEAR_DEFINE, context);
} else if (!nasm_stricmp(txt, "defalias") ||
!nasm_stricmp(txt, "alias") ||
!nasm_stricmp(txt, "salias")) {
do_clear(CLEAR_DEFALIAS, context);
} else if (!nasm_stricmp(txt, "alldef") ||
!nasm_stricmp(txt, "alldefine")) {
do_clear(CLEAR_ALLDEFINE, context);
} else if (!nasm_stricmp(txt, "macro") ||
!nasm_stricmp(txt, "mmacro")) {
do_clear(CLEAR_MMACRO, context);
} else if (!nasm_stricmp(txt, "context") ||
!nasm_stricmp(txt, "ctx")) {
context = true;
} else if (!nasm_stricmp(txt, "global")) {
context = false;
} else if (!nasm_stricmp(txt, "nothing") ||
!nasm_stricmp(txt, "none") ||
!nasm_stricmp(txt, "ignore") ||
!nasm_stricmp(txt, "-") ||
!nasm_stricmp(txt, "--")) {
/* Do nothing */
} else {
nasm_nonfatal("invalid option to %s: %s", dname, txt);
t = NULL;
}
}
}
t = skip_white(t);
if (t)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
break;
}
case PP_DEPEND:
t = tline->next = expand_smacro(tline->next);
t = skip_white(t);
if (!t || (t->type != TOK_STRING &&
t->type != TOK_INTERNAL_STRING)) {
nasm_nonfatal("`%s' expects a file name", dname);
goto done;
}
if (t->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
strlist_add(deplist, unquote_token_cstr(t));
goto done;
case PP_INCLUDE:
t = tline->next = expand_smacro(tline->next);
t = skip_white(t);
if (!t || (t->type != TOK_STRING &&
t->type != TOK_INTERNAL_STRING)) {
nasm_nonfatal("`%s' expects a file name", dname);
goto done;
}
if (t->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
p = unquote_token_cstr(t);
nasm_new(inc);
inc->next = istk;
found_path = NULL;
inc->fp = inc_fopen(p, deplist, &found_path,
(pp_mode == PP_DEPS)
? INC_OPTIONAL : INC_NEEDED, NF_TEXT);
if (!inc->fp) {
/* -MG given but file not found */
nasm_free(inc);
} else {
inc->where = src_where();
inc->lineinc = 1;
inc->nolist = istk->nolist;
inc->noline = istk->noline;
if (!inc->noline)
src_set(0, found_path ? found_path : p);
istk = inc;
lfmt->uplevel(LIST_INCLUDE, 0);
}
break;
case PP_USE:
{
const struct use_package *pkg;
const char *name;
pkg = get_use_pkg(tline->next, dname, &name);
if (!name)
goto done;
if (!pkg) {
nasm_nonfatal("unknown `%s' package: `%s'", dname, name);
} else if (!use_loaded[pkg->index]) {
/*
* Not already included, go ahead and include it.
* Treat it as an include file for the purpose of
* producing a listing.
*/
use_loaded[pkg->index] = true;
stdmacpos = pkg->macros;
nasm_new(inc);
inc->next = istk;
inc->nolist = istk->nolist + !list_option('b');
inc->noline = istk->noline;
if (!inc->noline)
src_set(0, NULL);
istk = inc;
lfmt->uplevel(LIST_INCLUDE, 0);
}
break;
}
case PP_PUSH:
case PP_REPL:
case PP_POP:
tline = tline->next;
tline = skip_white(tline);
tline = expand_id(tline);
if (tline) {
if (!tok_type(tline, TOK_ID)) {
nasm_nonfatal("`%s' expects a context identifier", dname);
goto done;
}
if (tline->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored",
dname);
p = tok_text(tline);
} else {
p = NULL; /* Anonymous */
}
if (op == PP_PUSH) {
nasm_new(ctx);
ctx->depth = cstk ? cstk->depth + 1 : 1;
ctx->next = cstk;
ctx->name = p ? nasm_strdup(p) : NULL;
ctx->number = unique++;
cstk = ctx;
} else {
/* %pop or %repl */
if (!cstk) {
nasm_nonfatal("`%s': context stack is empty", dname);
} else if (op == PP_POP) {
if (p && (!cstk->name || nasm_stricmp(p, cstk->name)))
nasm_nonfatal("`%s' in wrong context: %s, "
"expected %s",
dname, cstk->name ? cstk->name : "anonymous", p);
else
ctx_pop();
} else {
/* op == PP_REPL */
nasm_free((char *)cstk->name);
cstk->name = p ? nasm_strdup(p) : NULL;
p = NULL;
}
}
break;
case PP_FATAL:
severity = ERR_FATAL;
goto issue_error;
case PP_ERROR:
severity = ERR_NONFATAL|ERR_PASS2;
goto issue_error;
case PP_WARNING:
/*!
*!user [on] %warning directives
*! controls output of \c{%warning} directives (see \k{pperror}).
*/
severity = ERR_WARNING|WARN_USER|ERR_PASS2;
goto issue_error;
issue_error:
{
/* Only error out if this is the final pass */
tline->next = expand_smacro(tline->next);
tline = tline->next;
tline = skip_white(tline);
t = tline ? tline->next : NULL;
t = skip_white(t);
if (tok_type(tline, TOK_STRING) && !t) {
/* The line contains only a quoted string */
p = unquote_token(tline); /* Ignore NUL character truncation */
nasm_error(severity, "%s", p);
} else {
/* Not a quoted string, or more than a quoted string */
q = detoken(tline, false);
nasm_error(severity, "%s", q);
nasm_free(q);
}
break;
}
CASE_PP_IF:
if (istk->conds && !emitting(istk->conds->state))
j = COND_NEVER;
else {
j = if_condition(tline->next, op);
tline->next = NULL; /* it got freed */
}
cond = nasm_malloc(sizeof(Cond));
cond->next = istk->conds;
cond->state = j;
istk->conds = cond;
if(istk->mstk.mstk)
istk->mstk.mstk->condcnt++;
break;
CASE_PP_ELIF:
if (!istk->conds)
nasm_fatal("`%s': no matching `%%if'", dname);
switch(istk->conds->state) {
case COND_IF_TRUE:
istk->conds->state = COND_DONE;
break;
case COND_DONE:
case COND_NEVER:
break;
case COND_ELSE_TRUE:
case COND_ELSE_FALSE:
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"`%%elif' after `%%else' ignored");
istk->conds->state = COND_NEVER;
break;
case COND_IF_FALSE:
/*
* IMPORTANT: In the case of %if, we will already have
* called expand_mmac_params(); however, if we're
* processing an %elif we must have been in a
* non-emitting mode, which would have inhibited
* the normal invocation of expand_mmac_params().
* Therefore, we have to do it explicitly here.
*/
j = if_condition(expand_mmac_params(tline->next), op);
tline->next = NULL; /* it got freed */
istk->conds->state = j;
break;
}
break;
case PP_ELSE:
if (tline->next)
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"trailing garbage after `%%else' ignored");
if (!istk->conds)
nasm_fatal("`%%else: no matching `%%if'");
switch(istk->conds->state) {
case COND_IF_TRUE:
case COND_DONE:
istk->conds->state = COND_ELSE_FALSE;
break;
case COND_NEVER:
break;
case COND_IF_FALSE:
istk->conds->state = COND_ELSE_TRUE;
break;
case COND_ELSE_TRUE:
case COND_ELSE_FALSE:
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"`%%else' after `%%else' ignored.");
istk->conds->state = COND_NEVER;
break;
}
break;
case PP_ENDIF:
if (tline->next)
nasm_warn(WARN_OTHER|ERR_PP_PRECOND,
"trailing garbage after `%%endif' ignored");
if (!istk->conds)
nasm_fatal("`%%endif': no matching `%%if'");
cond = istk->conds;
istk->conds = cond->next;
nasm_free(cond);
if(istk->mstk.mstk)
istk->mstk.mstk->condcnt--;
break;
case PP_RMACRO:
case PP_MACRO:
{
MMacro *def;
nasm_assert(!defining);
nasm_new(def);
def->casesense = casesense;
/*
* dstk.mstk points to the previous definition bracket,
* whereas dstk.mmac points to the topmost mmacro, which
* in this case is the one we are just starting to create.
*/
def->dstk.mstk = defining;
def->dstk.mmac = def;
if (op == PP_RMACRO)
def->max_depth = nasm_limit[LIMIT_MACRO_LEVELS];
if (!parse_mmacro_spec(tline, def, dname)) {
nasm_free(def);
goto done;
}
defining = def;
defining->where = istk->where;
mmac = (MMacro *) hash_findix(&mmacros, defining->name);
while (mmac) {
if (!strcmp(mmac->name, defining->name) &&
(mmac->nparam_min <= defining->nparam_max
|| defining->plus)
&& (defining->nparam_min <= mmac->nparam_max
|| mmac->plus)) {
nasm_warn(WARN_OTHER, "redefining multi-line macro `%s'",
defining->name);
break;
}
mmac = mmac->next;
}
break;
}
case PP_ENDM:
case PP_ENDMACRO:
if (!(defining && defining->name)) {
nasm_nonfatal("`%s': not defining a macro", tok_text(tline));
goto done;
}
mmhead = (MMacro **) hash_findi_add(&mmacros, defining->name);
defining->next = *mmhead;
*mmhead = defining;
defining = NULL;
break;
case PP_EXITMACRO:
/*
* We must search along istk->expansion until we hit a
* macro-end marker for a macro with a name. Then we
* bypass all lines between exitmacro and endmacro.
*/
list_for_each(l, istk->expansion)
if (l->finishes && l->finishes->name)
break;
if (l) {
/*
* Remove all conditional entries relative to this
* macro invocation. (safe to do in this context)
*/
for ( ; l->finishes->condcnt > 0; l->finishes->condcnt --) {
cond = istk->conds;
istk->conds = cond->next;
nasm_free(cond);
}
istk->expansion = l;
} else {
nasm_nonfatal("`%%exitmacro' not within `%%macro' block");
}
break;
case PP_UNIMACRO:
casesense = false;
/* fall through */
case PP_UNMACRO:
{
MMacro **mmac_p;
MMacro spec;
nasm_zero(spec);
spec.casesense = casesense;
if (!parse_mmacro_spec(tline, &spec, dname)) {
goto done;
}
mmac_p = (MMacro **) hash_findi(&mmacros, spec.name, NULL);
while (mmac_p && *mmac_p) {
mmac = *mmac_p;
if (mmac->casesense == spec.casesense &&
!mstrcmp(mmac->name, spec.name, spec.casesense) &&
mmac->nparam_min == spec.nparam_min &&
mmac->nparam_max == spec.nparam_max &&
mmac->plus == spec.plus) {
*mmac_p = mmac->next;
free_mmacro(mmac);
} else {
mmac_p = &mmac->next;
}
}
free_tlist(spec.dlist);
break;
}
case PP_ROTATE:
while (tok_white(tline->next))
tline = tline->next;
if (!tline->next) {
free_tlist(origline);
nasm_nonfatal("`%%rotate' missing rotate count");
return DIRECTIVE_FOUND;
}
t = expand_smacro(tline->next);
tline->next = NULL;
pps.tptr = tline = t;
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
evalresult =
evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
free_tlist(tline);
if (!evalresult)
return DIRECTIVE_FOUND;
if (tokval.t_type)
nasm_warn(WARN_OTHER, "trailing garbage after expression ignored");
if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%%rotate'");
return DIRECTIVE_FOUND;
}
mmac = istk->mstk.mmac;
if (!mmac) {
nasm_nonfatal("`%%rotate' invoked outside a macro call");
} else if (mmac->nparam == 0) {
nasm_nonfatal("`%%rotate' invoked within macro without parameters");
} else {
int rotate = mmac->rotate + reloc_value(evalresult);
rotate %= (int)mmac->nparam;
if (rotate < 0)
rotate += mmac->nparam;
mmac->rotate = rotate;
}
break;
case PP_REP:
{
MMacro *tmp_defining;
nolist = 0;
tline = skip_white(tline->next);
if (tok_type(tline, TOK_ID) && tline->len == 7 &&
!nasm_memicmp(tline->text.a, ".nolist", 7)) {
if (!list_option('f'))
nolist |= NL_LIST; /* ... but update line numbers */
tline = skip_white(tline->next);
}
if (tline) {
pps.tptr = expand_smacro(tline);
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
/* XXX: really critical?! */
evalresult =
evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
if (!evalresult)
goto done;
if (tokval.t_type)
nasm_warn(WARN_OTHER, "trailing garbage after expression ignored");
if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%%rep'");
goto done;
}
count = reloc_value(evalresult);
if (count > nasm_limit[LIMIT_REP]) {
nasm_nonfatal("`%%rep' count %"PRId64" exceeds limit (currently %"PRId64")",
count, nasm_limit[LIMIT_REP]);
count = 0;
} else if (count < 0) {
/*!
*!negative-rep [on] regative %rep count
*! warns about negative counts given to the \c{%rep}
*! preprocessor directive.
*/
nasm_warn(ERR_PASS2|WARN_NEGATIVE_REP,
"negative `%%rep' count: %"PRId64, count);
count = 0;
} else {
count++;
}
} else {
nasm_nonfatal("`%%rep' expects a repeat count");
count = 0;
}
tmp_defining = defining;
nasm_new(defining);
defining->nolist = nolist;
defining->in_progress = count;
defining->mstk = istk->mstk;
defining->dstk.mstk = tmp_defining;
defining->dstk.mmac = tmp_defining ? tmp_defining->dstk.mmac : NULL;
defining->where = istk->where;
break;
}
case PP_ENDREP:
if (!defining || defining->name) {
nasm_nonfatal("`%%endrep': no matching `%%rep'");
goto done;
}
/*
* Now we have a "macro" defined - although it has no name
* and we won't be entering it in the hash tables - we must
* push a macro-end marker for it on to istk->expansion.
* After that, it will take care of propagating itself (a
* macro-end marker line for a macro which is really a %rep
* block will cause the macro to be re-expanded, complete
* with another macro-end marker to ensure the process
* continues) until the whole expansion is forcibly removed
* from istk->expansion by a %exitrep.
*/
nasm_new(l);
l->next = istk->expansion;
l->finishes = defining;
l->first = NULL;
l->where = src_where();
istk->expansion = l;
istk->mstk.mstk = defining;
/* A loop does not change istk->noline */
istk->nolist += !!(defining->nolist & NL_LIST);
if (!istk->nolist)
lfmt->uplevel(LIST_MACRO, 0);
defining = defining->dstk.mstk;
break;
case PP_EXITREP:
/*
* We must search along istk->expansion until we hit a
* macro-end marker for a macro with no name. Then we set
* its `in_progress' flag to 0.
*/
list_for_each(l, istk->expansion)
if (l->finishes && !l->finishes->name)
break;
if (l)
l->finishes->in_progress = 0;
else
nasm_nonfatal("`%%exitrep' not within `%%rep' block");
break;
case PP_DEFINE:
case PP_XDEFINE:
case PP_DEFALIAS:
{
SMacro tmpl;
Token **lastp;
int nparam;
if (!(mname = get_id(&tline, dname)))
goto done;
nasm_zero(tmpl);
lastp = &tline->next;
nparam = parse_smacro_template(&lastp, &tmpl);
tline = *lastp;
*lastp = NULL;
if (unlikely(op == PP_DEFALIAS)) {
macro_start = tline;
if (!is_macro_id(macro_start)) {
nasm_nonfatal("`%s' expects a macro identifier to alias",
dname);
goto done;
}
tt = macro_start->next;
macro_start->next = NULL;
tline = tline->next;
tline = skip_white(tline);
if (tline && tline->type) {
nasm_warn(WARN_OTHER,
"trailing garbage after aliasing identifier ignored");
}
free_tlist(tt);
tmpl.alias = true;
} else {
if (op == PP_XDEFINE) {
/* Protect macro parameter tokens */
if (nparam)
mark_smac_params(tline, &tmpl, TOK_XDEF_PARAM);
tline = expand_smacro(tline);
}
/* NB: Does this still make sense? */
macro_start = reverse_tokens(tline);
}
/*
* Good. We now have a macro name, a parameter count, and a
* token list (in reverse order) for an expansion. We ought
* to be OK just to create an SMacro, store it, and let
* free_tlist have the rest of the line (which we have
* carefully re-terminated after chopping off the expansion
* from the end).
*/
define_smacro(mname, casesense, macro_start, &tmpl);
break;
}
case PP_UNDEF:
case PP_UNDEFALIAS:
if (!(mname = get_id(&tline, dname)))
goto done;
if (tline->next)
nasm_warn(WARN_OTHER, "trailing garbage after macro name ignored");
undef_smacro(mname, op == PP_UNDEFALIAS);
break;
case PP_DEFSTR:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
tline = zap_white(tline);
q = detoken(tline, false);
macro_start = make_tok_qstr(NULL, q);
nasm_free(q);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a string token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
break;
case PP_DEFTOK:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
t = skip_white(tline);
/* t should now point to the string */
if (!tok_type(t, TOK_STRING)) {
nasm_nonfatal("`%s' requires string as second parameter", dname);
free_tlist(tline);
goto done;
}
/*
* Convert the string to a token stream. Note that smacros
* are stored with the token stream reversed, so we have to
* reverse the output of tokenize().
*/
macro_start = reverse_tokens(tokenize(unquote_token_cstr(t)));
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
case PP_PATHSEARCH:
{
const char *found_path;
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
t = skip_white(tline);
if (!t || (t->type != TOK_STRING &&
t->type != TOK_INTERNAL_STRING)) {
nasm_nonfatal("`%s' expects a file name", dname);
free_tlist(tline);
goto done;
}
if (t->next)
nasm_warn(WARN_OTHER, "trailing garbage after `%s' ignored", dname);
p = unquote_token_cstr(t);
inc_fopen(p, NULL, &found_path, INC_PROBE, NF_BINARY);
if (!found_path)
found_path = p;
macro_start = make_tok_qstr(NULL, found_path);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a string token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
}
case PP_STRLEN:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
t = skip_white(tline);
/* t should now point to the string */
if (!tok_type(t, TOK_STRING)) {
nasm_nonfatal("`%s' requires string as second parameter", dname);
free_tlist(tline);
free_tlist(origline);
return DIRECTIVE_FOUND;
}
unquote_token(t);
macro_start = make_tok_num(NULL, t->len);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
free_tlist(origline);
return DIRECTIVE_FOUND;
case PP_STRCAT:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
len = 0;
list_for_each(t, tline) {
switch (t->type) {
case TOK_WHITESPACE:
break;
case TOK_STRING:
unquote_token(t);
len += t->len;
break;
case TOK_OTHER:
if (tok_is(t, ',')) /* permit comma separators */
break;
/* else fall through */
default:
nasm_nonfatal("non-string passed to `%s': %s", dname,
tok_text(t));
free_tlist(tline);
goto done;
}
}
q = qbuf = nasm_malloc(len+1);
list_for_each(t, tline) {
if (t->type == TOK_INTERNAL_STRING)
q = mempcpy(q, tok_text(t), t->len);
}
*q = '\0';
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
macro_start = make_tok_qstr_len(NULL, qbuf, len);
nasm_free(qbuf);
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
case PP_SUBSTR:
{
int64_t start, count;
const char *txt;
size_t len;
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
if (tline) /* skip expanded id */
t = tline->next;
t = skip_white(t);
/* t should now point to the string */
if (!tok_type(t, TOK_STRING)) {
nasm_nonfatal("`%s' requires string as second parameter", dname);
free_tlist(tline);
goto done;
}
pps.tptr = t->next;
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
evalresult = evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
if (!evalresult) {
free_tlist(tline);
goto done;
} else if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%s'", dname);
free_tlist(tline);
goto done;
}
start = evalresult->value - 1;
pps.tptr = skip_white(pps.tptr);
if (!pps.tptr) {
count = 1; /* Backwards compatibility: one character */
} else {
tokval.t_type = TOKEN_INVALID;
evalresult = evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
if (!evalresult) {
free_tlist(tline);
goto done;
} else if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%s'", dname);
free_tlist(tline);
goto done;
}
count = evalresult->value;
}
unquote_token(t);
len = t->len;
/* make start and count being in range */
if (start < 0)
start = 0;
if (count < 0)
count = len + count + 1 - start;
if (start + count > (int64_t)len)
count = len - start;
if (!len || count < 0 || start >=(int64_t)len)
start = -1, count = 0; /* empty string */
txt = (start < 0) ? "" : tok_text(t) + start;
len = count;
macro_start = make_tok_qstr_len(NULL, txt, len);
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
free_tlist(tline);
break;
}
case PP_ASSIGN:
if (!(mname = get_id(&tline, dname)))
goto done;
last = tline;
tline = expand_smacro(tline->next);
last->next = NULL;
pps.tptr = tline;
pps.ntokens = -1;
tokval.t_type = TOKEN_INVALID;
evalresult = evaluate(ppscan, &pps, &tokval, NULL, true, NULL);
free_tlist(tline);
if (!evalresult)
goto done;
if (tokval.t_type)
nasm_warn(WARN_OTHER, "trailing garbage after expression ignored");
if (!is_simple(evalresult)) {
nasm_nonfatal("non-constant value given to `%s'", dname);
free_tlist(origline);
return DIRECTIVE_FOUND;
}
macro_start = make_tok_num(NULL, reloc_value(evalresult));
/*
* We now have a macro name, an implicit parameter count of
* zero, and a numeric token to use as an expansion. Create
* and store an SMacro.
*/
define_smacro(mname, casesense, macro_start, NULL);
break;
case PP_ALIASES:
tline = tline->next;
tline = expand_smacro(tline);
ppopt.noaliases = !pp_get_boolean_option(tline, !ppopt.noaliases);
break;
case PP_LINE:
nasm_panic("`%s' directive not preprocessed early", dname);
break;
case PP_NULL:
/* Goes nowhere, does nothing... */
break;
}
done:
free_tlist(origline);
return DIRECTIVE_FOUND;
}
| 0 |
[] |
nasm
|
6299a3114ce0f3acd55d07de201a8ca2f0a83059
| 212,028,324,981,934,300,000,000,000,000,000,000,000 | 1,314 |
BR 3392708: fix NULL pointer reference for invalid %stacksize
After issuing an error message for a missing %stacksize argument, need
to quit rather than continuing to try to access the pointer.
Fold uses of tok_text() while we are at it.
Reported-by: Suhwan <[email protected]>
Signed-off-by: H. Peter Anvin (Intel) <[email protected]>
|
file_flush (struct rw *rw)
{
struct rw_file *rwf = (struct rw_file *)rw;
if (fsync (rwf->fd) == -1) {
perror (rw->name);
exit (EXIT_FAILURE);
}
}
| 0 |
[
"CWE-252"
] |
libnbd
|
8d444b41d09a700c7ee6f9182a649f3f2d325abb
| 330,881,470,375,758,700,000,000,000,000,000,000,000 | 9 |
copy: CVE-2022-0485: Fail nbdcopy if NBD read or write fails
nbdcopy has a nasty bug when performing multi-threaded copies using
asynchronous nbd calls - it was blindly treating the completion of an
asynchronous command as successful, rather than checking the *error
parameter. This can result in the silent creation of a corrupted
image in two different ways: when a read fails, we blindly wrote
garbage to the destination; when a write fails, we did not flag that
the destination was not written.
Since nbdcopy already calls exit() on a synchronous read or write
failure to a file, doing the same for an asynchronous op to an NBD
server is the simplest solution. A nicer solution, but more invasive
to code and thus not done here, might be to allow up to N retries of
the transaction (in case the read or write failure was transient), or
even having a mode where as much data is copied as possible (portions
of the copy that failed would be logged on stderr, and nbdcopy would
still fail with a non-zero exit status, but this would copy more than
just stopping at the first error, as can be done with rsync or
ddrescue).
Note that since we rely on auto-retiring and do NOT call
nbd_aio_command_completed, our completion callbacks must always return
1 (if they do not exit() first), even when acting on *error, so as not
leave the command allocated until nbd_close. As such, there is no
sane way to return an error to a manual caller of the callback, and
therefore we can drop dead code that calls perror() and exit() if the
callback "failed". It is also worth documenting the contract on when
we must manually call the callback during the asynch_zero callback, so
that we do not leak or double-free the command; thankfully, all the
existing code paths were correct.
The added testsuite script demonstrates several scenarios, some of
which fail without the rest of this patch in place, and others which
showcase ways in which sparse images can bypass errors.
Once backports are complete, a followup patch on the main branch will
edit docs/libnbd-security.pod with the mailing list announcement of
the stable branch commit ids and release versions that incorporate
this fix.
Reported-by: Nir Soffer <[email protected]>
Fixes: bc896eec4d ("copy: Implement multi-conn, multiple threads, multiple requests in flight.", v1.5.6)
Fixes: https://bugzilla.redhat.com/2046194
Message-Id: <[email protected]>
Acked-by: Richard W.M. Jones <[email protected]>
Acked-by: Nir Soffer <[email protected]>
[eblake: fix error message per Nir, tweak requires lines in unit test per Rich]
Reviewed-by: Laszlo Ersek <[email protected]>
|
static void annotate_state_free(annotate_state_t **statep)
{
annotate_state_t *state = *statep;
if (!state)
return;
annotate_state_finish(state);
annotate_state_unset_scope(state);
free(state);
*statep = NULL;
}
| 0 |
[
"CWE-732"
] |
cyrus-imapd
|
621f9e41465b521399f691c241181300fab55995
| 14,420,633,829,371,620,000,000,000,000,000,000,000 | 12 |
annotate: don't allow everyone to write shared server entries
|
static int dx2_decode_slice_565(GetBitContext *gb, int width, int height,
uint8_t *dst, int stride, int is_565)
{
int x, y;
int r, g, b;
uint8_t lru[3][8];
memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
memcpy(lru[1], is_565 ? def_lru_565 : def_lru_555, 8 * sizeof(*def_lru));
memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
b = decode_sym_565(gb, lru[0], 5);
g = decode_sym_565(gb, lru[1], is_565 ? 6 : 5);
r = decode_sym_565(gb, lru[2], 5);
dst[x * 3 + 0] = (r << 3) | (r >> 2);
dst[x * 3 + 1] = is_565 ? (g << 2) | (g >> 4) : (g << 3) | (g >> 2);
dst[x * 3 + 2] = (b << 3) | (b >> 2);
}
dst += stride;
}
return 0;
}
| 0 |
[
"CWE-190"
] |
FFmpeg
|
a392bf657015c9a79a5a13adfbfb15086c1943b9
| 107,320,138,734,556,650,000,000,000,000,000,000,000 | 26 |
avcodec/dxtory: fix src size checks
Fixes integer overflow
Fixes out of array read
Fixes: d104661bb59b202df7671fb19a00ca6c-asan_heap-oob_d6429d_5066_cov_1729501105_dxtory_mic.avi
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]>
|
static inline int ipv6_addr_type(const struct in6_addr *addr)
{
return __ipv6_addr_type(addr) & 0xffff;
}
| 0 |
[
"CWE-416",
"CWE-284",
"CWE-264"
] |
linux
|
45f6fad84cc305103b28d73482b344d7f5b76f39
| 11,650,241,627,607,222,000,000,000,000,000,000,000 | 4 |
ipv6: add complete rcu protection around np->opt
This patch addresses multiple problems :
UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions
while socket is not locked : Other threads can change np->opt
concurrently. Dmitry posted a syzkaller
(http://github.com/google/syzkaller) program desmonstrating
use-after-free.
Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock()
and dccp_v6_request_recv_sock() also need to use RCU protection
to dereference np->opt once (before calling ipv6_dup_options())
This patch adds full RCU protection to np->opt
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
unsigned int udc_cnt, unsigned int hooknr, char *base)
{
int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
const struct ebt_entry *e = (struct ebt_entry *)chain->data;
const struct ebt_entry_target *t;
while (pos < nentries || chain_nr != -1) {
/* end of udc, go back one 'recursion' step */
if (pos == nentries) {
/* put back values of the time when this chain was called */
e = cl_s[chain_nr].cs.e;
if (cl_s[chain_nr].from != -1)
nentries =
cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
else
nentries = chain->nentries;
pos = cl_s[chain_nr].cs.n;
/* make sure we won't see a loop that isn't one */
cl_s[chain_nr].cs.n = 0;
chain_nr = cl_s[chain_nr].from;
if (pos == nentries)
continue;
}
t = (struct ebt_entry_target *)
(((char *)e) + e->target_offset);
if (strcmp(t->u.name, EBT_STANDARD_TARGET))
goto letscontinue;
if (e->target_offset + sizeof(struct ebt_standard_target) >
e->next_offset) {
BUGPRINT("Standard target size too big\n");
return -1;
}
verdict = ((struct ebt_standard_target *)t)->verdict;
if (verdict >= 0) { /* jump to another chain */
struct ebt_entries *hlp2 =
(struct ebt_entries *)(base + verdict);
for (i = 0; i < udc_cnt; i++)
if (hlp2 == cl_s[i].cs.chaininfo)
break;
/* bad destination or loop */
if (i == udc_cnt) {
BUGPRINT("bad destination\n");
return -1;
}
if (cl_s[i].cs.n) {
BUGPRINT("loop\n");
return -1;
}
if (cl_s[i].hookmask & (1 << hooknr))
goto letscontinue;
/* this can't be 0, so the loop test is correct */
cl_s[i].cs.n = pos + 1;
pos = 0;
cl_s[i].cs.e = ebt_next_entry(e);
e = (struct ebt_entry *)(hlp2->data);
nentries = hlp2->nentries;
cl_s[i].from = chain_nr;
chain_nr = i;
/* this udc is accessible from the base chain for hooknr */
cl_s[i].hookmask |= (1 << hooknr);
continue;
}
letscontinue:
e = ebt_next_entry(e);
pos++;
}
return 0;
}
| 0 |
[
"CWE-20"
] |
linux
|
d846f71195d57b0bbb143382647c2c6638b04c5a
| 294,186,162,094,309,600,000,000,000,000,000,000,000 | 69 |
bridge: netfilter: fix information leak
Struct tmp is copied from userspace. It is not checked whether the "name"
field is NULL terminated. This may lead to buffer overflow and passing
contents of kernel stack as a module name to try_then_request_module() and,
consequently, to modprobe commandline. It would be seen by all userspace
processes.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
return err;
}
static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
u64 ep_handle)
{
struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep;
if (!transport->ep_disconnect)
return -EINVAL;
ep = iscsi_lookup_endpoint(ep_handle);
if (!ep)
return -EINVAL;
conn = ep->conn;
if (conn) {
mutex_lock(&conn->ep_mutex);
conn->ep = NULL;
mutex_unlock(&conn->ep_mutex);
}
| 0 |
[
"CWE-787"
] |
linux
|
ec98ea7070e94cc25a422ec97d1421e28d97b7ee
| 191,127,705,575,968,400,000,000,000,000,000,000,000 | 22 |
scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE
As the iSCSI parameters are exported back through sysfs, it should be
enforcing that they never are more than PAGE_SIZE (which should be more
than enough) before accepting updates through netlink.
Change all iSCSI sysfs attributes to use sysfs_emit().
Cc: [email protected]
Reported-by: Adam Nichols <[email protected]>
Reviewed-by: Lee Duncan <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Reviewed-by: Mike Christie <[email protected]>
Signed-off-by: Chris Leech <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
RuntimeShape(int dimensions_count, const int32_t* dims_data) : size_(0) {
ReplaceWith(dimensions_count, dims_data);
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
8ee24e7949a203d234489f9da2c5bf45a7d5157d
| 146,175,229,728,474,400,000,000,000,000,000,000,000 | 3 |
[tflite] Ensure `MatchingDim` does not allow buffer overflow.
We check in `MatchingDim` that both arguments have the same dimensionality, however that is a `DCHECK` only enabled if building in debug mode. Hence, it could be possible to cause buffer overflows by passing in a tensor with larger dimensions as the second argument. To fix, we now make `MatchingDim` return the minimum of the two sizes.
A much better fix would be to return a status object but that requires refactoring a large part of the codebase for minor benefits.
PiperOrigin-RevId: 332526127
Change-Id: If627d0d2c80a685217b6e0d1e64b0872dbf1c5e4
|
static ssize_t shmem_file_aio_read(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
ssize_t retval;
unsigned long seg;
size_t count;
loff_t *ppos = &iocb->ki_pos;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
for (seg = 0; seg < nr_segs; seg++) {
read_descriptor_t desc;
desc.written = 0;
desc.arg.buf = iov[seg].iov_base;
desc.count = iov[seg].iov_len;
if (desc.count == 0)
continue;
desc.error = 0;
do_shmem_file_read(filp, ppos, &desc, file_read_actor);
retval += desc.written;
if (desc.error) {
retval = retval ?: desc.error;
break;
}
if (desc.count > 0)
break;
}
return retval;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
14fcc23fdc78e9d32372553ccf21758a9bd56fa1
| 60,485,845,794,091,200,000,000,000,000,000,000,000 | 33 |
tmpfs: fix kernel BUG in shmem_delete_inode
SuSE's insserve initscript ordering program hits kernel BUG at mm/shmem.c:814
on 2.6.26. It's using posix_fadvise on directories, and the shmem_readpage
method added in 2.6.23 is letting POSIX_FADV_WILLNEED allocate useless pages
to a tmpfs directory, incrementing i_blocks count but never decrementing it.
Fix this by assigning shmem_aops (pointing to readpage and writepage and
set_page_dirty) only when it's needed, on a regular file or a long symlink.
Many thanks to Kel for outstanding bugreport and steps to reproduce it.
Reported-by: Kel Modderman <[email protected]>
Tested-by: Kel Modderman <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Cc: <[email protected]> [2.6.25.x, 2.6.26.x]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int netlink_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"sk Eth Pid Groups "
"Rmem Wmem Dump Locks Drops Inode\n");
} else {
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->portid,
nlk->groups ? (u32)nlk->groups[0] : 0,
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
nlk->cb_running,
atomic_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
);
}
return 0;
}
| 0 |
[
"CWE-362",
"CWE-415"
] |
linux
|
92964c79b357efd980812c4de5c1fd2ec8bb5520
| 201,008,805,693,688,300,000,000,000,000,000,000,000 | 26 |
netlink: Fix dump skb leak/double free
When we free cb->skb after a dump, we do it after releasing the
lock. This means that a new dump could have started in the time
being and we'll end up freeing their skb instead of ours.
This patch saves the skb and module before we unlock so we free
the right memory.
Fixes: 16b304f3404f ("netlink: Eliminate kmalloc in netlink dump operation.")
Reported-by: Baozeng Ding <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Acked-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
struct ip6t_entry *iter;
void *loc_cpu_entry;
int ret;
if (!newinfo || !info)
return -EINVAL;
/* we dont care about newinfo->entries[] */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
xt_compat_init_offsets(AF_INET6, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
return ret;
}
return 0;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
6a8ab060779779de8aea92ce3337ca348f973f54
| 57,351,211,326,927,150,000,000,000,000,000,000,000 | 22 |
ipv6: netfilter: ip6_tables: fix infoleak to userspace
Structures ip6t_replace, compat_ip6t_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first bug was introduced before the git epoch; the second was
introduced in 3bc3fe5e (v2.6.25-rc1); the third is introduced by
6b7d31fc (v2.6.15-rc1). To trigger the bug one should have
CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
static void ssl_write_encrypt_then_mac_ext( mbedtls_ssl_context *ssl,
unsigned char *buf,
size_t *olen )
{
unsigned char *p = buf;
const mbedtls_ssl_ciphersuite_t *suite = NULL;
const mbedtls_cipher_info_t *cipher = NULL;
if( ssl->session_negotiate->encrypt_then_mac == MBEDTLS_SSL_EXTENDED_MS_DISABLED ||
ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 )
{
*olen = 0;
return;
}
/*
* RFC 7366: "If a server receives an encrypt-then-MAC request extension
* from a client and then selects a stream or Authenticated Encryption
* with Associated Data (AEAD) ciphersuite, it MUST NOT send an
* encrypt-then-MAC response extension back to the client."
*/
if( ( suite = mbedtls_ssl_ciphersuite_from_id(
ssl->session_negotiate->ciphersuite ) ) == NULL ||
( cipher = mbedtls_cipher_info_from_type( suite->cipher ) ) == NULL ||
cipher->mode != MBEDTLS_MODE_CBC )
{
*olen = 0;
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "server hello, adding encrypt then mac extension" ) );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_ENCRYPT_THEN_MAC ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
| 0 |
[
"CWE-20",
"CWE-190"
] |
mbedtls
|
83c9f495ffe70c7dd280b41fdfd4881485a3bc28
| 45,896,836,471,743,870,000,000,000,000,000,000,000 | 40 |
Prevent bounds check bypass through overflow in PSK identity parsing
The check `if( *p + n > end )` in `ssl_parse_client_psk_identity` is
unsafe because `*p + n` might overflow, thus bypassing the check. As
`n` is a user-specified value up to 65K, this is relevant if the
library happens to be located in the last 65K of virtual memory.
This commit replaces the check by a safe version.
|
xmlXPathNodeLeadingSorted (xmlNodeSetPtr nodes, xmlNodePtr node) {
int i, l;
xmlNodePtr cur;
xmlNodeSetPtr ret;
if (node == NULL)
return(nodes);
ret = xmlXPathNodeSetCreate(NULL);
if (ret == NULL)
return(ret);
if (xmlXPathNodeSetIsEmpty(nodes) ||
(!xmlXPathNodeSetContains(nodes, node)))
return(ret);
l = xmlXPathNodeSetGetLength(nodes);
for (i = 0; i < l; i++) {
cur = xmlXPathNodeSetItem(nodes, i);
if (cur == node)
break;
if (xmlXPathNodeSetAddUnique(ret, cur) < 0)
break;
}
return(ret);
}
| 0 |
[] |
libxml2
|
03c6723043775122313f107695066e5744189a08
| 9,400,880,435,176,862,000,000,000,000,000,000,000 | 25 |
Handling of XPath function arguments in error case
The XPath engine tries to guarantee that every XPath function can pop
'nargs' non-NULL values off the stack. libxslt, for example, relies on
this assumption. But the check isn't thorough enough if there are errors
during the evaluation of arguments. This can lead to segfaults:
https://mail.gnome.org/archives/xslt/2013-December/msg00005.html
This commit makes the handling of function arguments more robust.
* Bail out early when evaluation of XPath function arguments fails.
* Make sure that there are 'nargs' arguments in the current call frame.
|
fc_dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
0c319d3a144d4b8f1ea2047fd614d2149b68f889
| 212,816,763,342,145,670,000,000,000,000,000,000,000 | 5 |
nvmet-fc: ensure target queue id within range.
When searching for queue id's ensure they are within the expected range.
Signed-off-by: James Smart <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{
int err;
struct net_device *netdev;
struct netfront_info *np;
netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
if (!netdev)
return ERR_PTR(-ENOMEM);
np = netdev_priv(netdev);
np->xbdev = dev;
np->queues = NULL;
err = -ENOMEM;
np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->rx_stats == NULL)
goto exit;
np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->tx_stats == NULL)
goto exit;
netdev->netdev_ops = &xennet_netdev_ops;
netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_GSO_ROBUST;
netdev->hw_features = NETIF_F_SG |
NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
/*
* Assume that all hw features are available for now. This set
* will be adjusted by the call to netdev_update_features() in
* xennet_connect() which is the earliest point where we can
* negotiate with the backend regarding supported features.
*/
netdev->features |= netdev->hw_features;
netdev->ethtool_ops = &xennet_ethtool_ops;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
SET_NETDEV_DEV(netdev, &dev->dev);
np->netdev = netdev;
np->netfront_xdp_enabled = false;
netif_carrier_off(netdev);
do {
xenbus_switch_state(dev, XenbusStateInitialising);
err = wait_event_timeout(module_wq,
xenbus_read_driver_state(dev->otherend) !=
XenbusStateClosed &&
xenbus_read_driver_state(dev->otherend) !=
XenbusStateUnknown, XENNET_TIMEOUT);
} while (!err);
return netdev;
exit:
xennet_free_netdev(netdev);
return ERR_PTR(err);
}
| 0 |
[] |
linux
|
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
| 239,323,061,223,347,600,000,000,000,000,000,000,000 | 64 |
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
The commit referenced below moved the invocation past the "next" label,
without any explanation. In fact this allows misbehaving backends undue
control over the domain the frontend runs in, as earlier detected errors
require the skb to not be freed (it may be retained for later processing
via xennet_move_rx_slot(), or it may simply be unsafe to have it freed).
This is CVE-2022-33743 / XSA-405.
Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront")
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
|
void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org)
{
scr_writew(val, org);
if ((unsigned long)org == vc->vc_pos) {
softcursor_original = -1;
add_softcursor(vc);
}
}
| 0 |
[
"CWE-125"
] |
linux
|
3c4e0dff2095c579b142d5a0693257f1c58b4804
| 325,369,417,797,297,200,000,000,000,000,000,000,000 | 8 |
vt: Disable KD_FONT_OP_COPY
It's buggy:
On Fri, Nov 06, 2020 at 10:30:08PM +0800, Minh Yuan wrote:
> We recently discovered a slab-out-of-bounds read in fbcon in the latest
> kernel ( v5.10-rc2 for now ). The root cause of this vulnerability is that
> "fbcon_do_set_font" did not handle "vc->vc_font.data" and
> "vc->vc_font.height" correctly, and the patch
> <https://lkml.org/lkml/2020/9/27/223> for VT_RESIZEX can't handle this
> issue.
>
> Specifically, we use KD_FONT_OP_SET to set a small font.data for tty6, and
> use KD_FONT_OP_SET again to set a large font.height for tty1. After that,
> we use KD_FONT_OP_COPY to assign tty6's vc_font.data to tty1's vc_font.data
> in "fbcon_do_set_font", while tty1 retains the original larger
> height. Obviously, this will cause an out-of-bounds read, because we can
> access a smaller vc_font.data with a larger vc_font.height.
Further there was only one user ever.
- Android's loadfont, busybox and console-tools only ever use OP_GET
and OP_SET
- fbset documentation only mentions the kernel cmdline font: option,
not anything else.
- systemd used OP_COPY before release 232 published in Nov 2016
Now unfortunately the crucial report seems to have gone down with
gmane, and the commit message doesn't say much. But the pull request
hints at OP_COPY being broken
https://github.com/systemd/systemd/pull/3651
So in other words, this never worked, and the only project which
foolishly every tried to use it, realized that rather quickly too.
Instead of trying to fix security issues here on dead code by adding
missing checks, fix the entire thing by removing the functionality.
Note that systemd code using the OP_COPY function ignored the return
value, so it doesn't matter what we're doing here really - just in
case a lone server somewhere happens to be extremely unlucky and
running an affected old version of systemd. The relevant code from
font_copy_to_all_vcs() in systemd was:
/* copy font from active VT, where the font was uploaded to */
cfo.op = KD_FONT_OP_COPY;
cfo.height = vcs.v_active-1; /* tty1 == index 0 */
(void) ioctl(vcfd, KDFONTOP, &cfo);
Note this just disables the ioctl, garbage collecting the now unused
callbacks is left for -next.
v2: Tetsuo found the old mail, which allowed me to find it on another
archive. Add the link too.
Acked-by: Peilin Ye <[email protected]>
Reported-by: Minh Yuan <[email protected]>
References: https://lists.freedesktop.org/archives/systemd-devel/2016-June/036935.html
References: https://github.com/systemd/systemd/pull/3651
Cc: Greg KH <[email protected]>
Cc: Peilin Ye <[email protected]>
Cc: Tetsuo Handa <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
WebContents::~WebContents() {
// clear out objects that have been granted permissions so that when
// WebContents::RenderFrameDeleted is called as a result of WebContents
// destruction it doesn't try to clear out a granted_devices_
// on a destructed object.
granted_devices_.clear();
if (!inspectable_web_contents_) {
WebContentsDestroyed();
return;
}
inspectable_web_contents_->GetView()->SetDelegate(nullptr);
// This event is only for internal use, which is emitted when WebContents is
// being destroyed.
Emit("will-destroy");
// For guest view based on OOPIF, the WebContents is released by the embedder
// frame, and we need to clear the reference to the memory.
bool not_owned_by_this = IsGuest() && attached_;
#if BUILDFLAG(ENABLE_ELECTRON_EXTENSIONS)
// And background pages are owned by extensions::ExtensionHost.
if (type_ == Type::kBackgroundPage)
not_owned_by_this = true;
#endif
if (not_owned_by_this) {
inspectable_web_contents_->ReleaseWebContents();
WebContentsDestroyed();
}
// InspectableWebContents will be automatically destroyed.
}
| 0 |
[] |
electron
|
e9fa834757f41c0b9fe44a4dffe3d7d437f52d34
| 99,068,758,749,886,600,000,000,000,000,000,000,000 | 33 |
fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]>
|
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{
struct mm_struct *mm;
int err;
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
if (err)
return ERR_PTR(err);
mm = get_task_mm(task);
if (mm && mm != current->mm &&
!ptrace_may_access(task, mode)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
mutex_unlock(&task->signal->cred_guard_mutex);
return mm;
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
linux
|
2b7e8665b4ff51c034c55df3cff76518d1a9ee3a
| 46,181,294,860,787,840,000,000,000,000,000,000,000 | 19 |
fork: fix incorrect fput of ->exe_file causing use-after-free
Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for
write killable") made it possible to kill a forking task while it is
waiting to acquire its ->mmap_sem for write, in dup_mmap().
However, it was overlooked that this introduced an new error path before
a reference is taken on the mm_struct's ->exe_file. Since the
->exe_file of the new mm_struct was already set to the old ->exe_file by
the memcpy() in dup_mm(), it was possible for the mmput() in the error
path of dup_mm() to drop a reference to ->exe_file which was never
taken.
This caused the struct file to later be freed prematurely.
Fix it by updating mm_init() to NULL out the ->exe_file, in the same
place it clears other things like the list of mmaps.
This bug was found by syzkaller. It can be reproduced using the
following C program:
#define _GNU_SOURCE
#include <pthread.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
static void *mmap_thread(void *_arg)
{
for (;;) {
mmap(NULL, 0x1000000, PROT_READ,
MAP_POPULATE|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
}
}
static void *fork_thread(void *_arg)
{
usleep(rand() % 10000);
fork();
}
int main(void)
{
fork();
fork();
fork();
for (;;) {
if (fork() == 0) {
pthread_t t;
pthread_create(&t, NULL, mmap_thread, NULL);
pthread_create(&t, NULL, fork_thread, NULL);
usleep(rand() % 10000);
syscall(__NR_exit_group, 0);
}
wait(NULL);
}
}
No special kernel config options are needed. It usually causes a NULL
pointer dereference in __remove_shared_vm_struct() during exit, or in
dup_mmap() (which is usually inlined into copy_process()) during fork.
Both are due to a vm_area_struct's ->vm_file being used after it's
already been freed.
Google Bug Id: 64772007
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable")
Signed-off-by: Eric Biggers <[email protected]>
Tested-by: Mark Rutland <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: <[email protected]> [v4.7+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void DecodeRegisterPerfCounters(DecodeThreadVars *dtv, ThreadVars *tv)
{
/* register counters */
dtv->counter_pkts = StatsRegisterCounter("decoder.pkts", tv);
dtv->counter_bytes = StatsRegisterCounter("decoder.bytes", tv);
dtv->counter_invalid = StatsRegisterCounter("decoder.invalid", tv);
dtv->counter_ipv4 = StatsRegisterCounter("decoder.ipv4", tv);
dtv->counter_ipv6 = StatsRegisterCounter("decoder.ipv6", tv);
dtv->counter_eth = StatsRegisterCounter("decoder.ethernet", tv);
dtv->counter_raw = StatsRegisterCounter("decoder.raw", tv);
dtv->counter_null = StatsRegisterCounter("decoder.null", tv);
dtv->counter_sll = StatsRegisterCounter("decoder.sll", tv);
dtv->counter_tcp = StatsRegisterCounter("decoder.tcp", tv);
dtv->counter_udp = StatsRegisterCounter("decoder.udp", tv);
dtv->counter_sctp = StatsRegisterCounter("decoder.sctp", tv);
dtv->counter_icmpv4 = StatsRegisterCounter("decoder.icmpv4", tv);
dtv->counter_icmpv6 = StatsRegisterCounter("decoder.icmpv6", tv);
dtv->counter_ppp = StatsRegisterCounter("decoder.ppp", tv);
dtv->counter_pppoe = StatsRegisterCounter("decoder.pppoe", tv);
dtv->counter_gre = StatsRegisterCounter("decoder.gre", tv);
dtv->counter_vlan = StatsRegisterCounter("decoder.vlan", tv);
dtv->counter_vlan_qinq = StatsRegisterCounter("decoder.vlan_qinq", tv);
dtv->counter_ieee8021ah = StatsRegisterCounter("decoder.ieee8021ah", tv);
dtv->counter_teredo = StatsRegisterCounter("decoder.teredo", tv);
dtv->counter_ipv4inipv6 = StatsRegisterCounter("decoder.ipv4_in_ipv6", tv);
dtv->counter_ipv6inipv6 = StatsRegisterCounter("decoder.ipv6_in_ipv6", tv);
dtv->counter_mpls = StatsRegisterCounter("decoder.mpls", tv);
dtv->counter_avg_pkt_size = StatsRegisterAvgCounter("decoder.avg_pkt_size", tv);
dtv->counter_max_pkt_size = StatsRegisterMaxCounter("decoder.max_pkt_size", tv);
dtv->counter_erspan = StatsRegisterMaxCounter("decoder.erspan", tv);
dtv->counter_flow_memcap = StatsRegisterCounter("flow.memcap", tv);
dtv->counter_flow_tcp = StatsRegisterCounter("flow.tcp", tv);
dtv->counter_flow_udp = StatsRegisterCounter("flow.udp", tv);
dtv->counter_flow_icmp4 = StatsRegisterCounter("flow.icmpv4", tv);
dtv->counter_flow_icmp6 = StatsRegisterCounter("flow.icmpv6", tv);
dtv->counter_defrag_ipv4_fragments =
StatsRegisterCounter("defrag.ipv4.fragments", tv);
dtv->counter_defrag_ipv4_reassembled =
StatsRegisterCounter("defrag.ipv4.reassembled", tv);
dtv->counter_defrag_ipv4_timeouts =
StatsRegisterCounter("defrag.ipv4.timeouts", tv);
dtv->counter_defrag_ipv6_fragments =
StatsRegisterCounter("defrag.ipv6.fragments", tv);
dtv->counter_defrag_ipv6_reassembled =
StatsRegisterCounter("defrag.ipv6.reassembled", tv);
dtv->counter_defrag_ipv6_timeouts =
StatsRegisterCounter("defrag.ipv6.timeouts", tv);
dtv->counter_defrag_max_hit =
StatsRegisterCounter("defrag.max_frag_hits", tv);
for (int i = 0; i < DECODE_EVENT_MAX; i++) {
BUG_ON(i != (int)DEvents[i].code);
if (i <= DECODE_EVENT_PACKET_MAX && !stats_decoder_events)
continue;
if (i > DECODE_EVENT_PACKET_MAX && !stats_stream_events)
continue;
dtv->counter_engine_events[i] = StatsRegisterCounter(
DEvents[i].event_name, tv);
}
return;
}
| 0 |
[
"CWE-20"
] |
suricata
|
11f3659f64a4e42e90cb3c09fcef66894205aefe
| 66,777,041,382,772,960,000,000,000,000,000,000,000 | 66 |
teredo: be stricter on what to consider valid teredo
Invalid Teredo can lead to valid DNS traffic (or other UDP traffic)
being misdetected as Teredo. This leads to false negatives in the
UDP payload inspection.
Make the teredo code only consider a packet teredo if the encapsulated
data was decoded without any 'invalid' events being set.
Bug #2736.
|
virtual bool find_function_processor (uchar *arg)
{
return FALSE;
}
| 0 |
[] |
mysql-server
|
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
| 186,388,260,548,853,320,000,000,000,000,000,000,000 | 4 |
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item.
|
static int cgm_get(const char *filename, char *value, size_t len, const char *name, const char *lxcpath)
{
pid_t pid;
int p[2], ret, newlen, readlen;
if (pipe(p) < 0)
return -1;
if ((pid = fork()) < 0) {
close(p[0]);
close(p[1]);
return -1;
}
if (!pid) // do_cgm_get exits
do_cgm_get(name, lxcpath, filename, p[1], len && value);
close(p[1]);
ret = read(p[0], &newlen, sizeof(newlen));
if (ret != sizeof(newlen)) {
close(p[0]);
ret = -1;
goto out;
}
if (!len || !value) {
close(p[0]);
ret = newlen;
goto out;
}
memset(value, 0, len);
if (newlen < 0) { // child is reporting an error
close(p[0]);
ret = -1;
goto out;
}
if (newlen == 0) { // empty read
close(p[0]);
ret = 0;
goto out;
}
readlen = newlen > len ? len : newlen;
ret = read(p[0], value, readlen);
close(p[0]);
if (ret != readlen) {
ret = -1;
goto out;
}
if (newlen >= len) {
value[len-1] = '\0';
newlen = len-1;
} else if (newlen+1 < len) {
// cgmanager doesn't add eol to last entry
value[newlen++] = '\n';
value[newlen] = '\0';
}
ret = newlen;
out:
if (wait_for_pid(pid))
WARN("do_cgm_get exited with error");
return ret;
}
| 0 |
[
"CWE-59",
"CWE-61"
] |
lxc
|
592fd47a6245508b79fe6ac819fe6d3b2c1289be
| 159,418,671,456,914,600,000,000,000,000,000,000,000 | 58 |
CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
|
static void __sk_free(struct sock *sk)
{
struct sk_filter *filter;
if (sk->sk_destruct)
sk->sk_destruct(sk);
filter = rcu_dereference_check(sk->sk_filter,
atomic_read(&sk->sk_wmem_alloc) == 0);
if (filter) {
sk_filter_uncharge(sk, filter);
RCU_INIT_POINTER(sk->sk_filter, NULL);
}
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
if (atomic_read(&sk->sk_omem_alloc))
printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_omem_alloc));
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
put_net(sock_net(sk));
sk_prot_free(sk->sk_prot_creator, sk);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
82981930125abfd39d7c8378a9cfdf5e1be2002b
| 191,481,393,890,039,680,000,000,000,000,000,000,000 | 26 |
net: cleanups in sock_setsockopt()
Use min_t()/max_t() macros, reformat two comments, use !!test_bit() to
match !!sock_flag()
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int ssl_prepare_clienthello_tlsext(SSL *s)
{
#ifndef OPENSSL_NO_EC
/* If we are client and using an elliptic curve cryptography cipher suite, send the point formats
* and elliptic curves we support.
*/
int using_ecc = 0;
int i;
unsigned char *j;
unsigned long alg_k, alg_a;
STACK_OF(SSL_CIPHER) *cipher_stack = SSL_get_ciphers(s);
for (i = 0; i < sk_SSL_CIPHER_num(cipher_stack); i++)
{
SSL_CIPHER *c = sk_SSL_CIPHER_value(cipher_stack, i);
alg_k = c->algorithm_mkey;
alg_a = c->algorithm_auth;
if ((alg_k & (SSL_kEECDH|SSL_kECDHr|SSL_kECDHe) || (alg_a & SSL_aECDSA)))
{
using_ecc = 1;
break;
}
}
using_ecc = using_ecc && (s->version >= TLS1_VERSION);
if (using_ecc)
{
if (s->tlsext_ecpointformatlist != NULL) OPENSSL_free(s->tlsext_ecpointformatlist);
if ((s->tlsext_ecpointformatlist = OPENSSL_malloc(3)) == NULL)
{
SSLerr(SSL_F_SSL_PREPARE_CLIENTHELLO_TLSEXT,ERR_R_MALLOC_FAILURE);
return -1;
}
s->tlsext_ecpointformatlist_length = 3;
s->tlsext_ecpointformatlist[0] = TLSEXT_ECPOINTFORMAT_uncompressed;
s->tlsext_ecpointformatlist[1] = TLSEXT_ECPOINTFORMAT_ansiX962_compressed_prime;
s->tlsext_ecpointformatlist[2] = TLSEXT_ECPOINTFORMAT_ansiX962_compressed_char2;
/* we support all named elliptic curves in draft-ietf-tls-ecc-12 */
if (s->tlsext_ellipticcurvelist != NULL) OPENSSL_free(s->tlsext_ellipticcurvelist);
s->tlsext_ellipticcurvelist_length = sizeof(nid_list)/sizeof(nid_list[0]) * 2;
if ((s->tlsext_ellipticcurvelist = OPENSSL_malloc(s->tlsext_ellipticcurvelist_length)) == NULL)
{
s->tlsext_ellipticcurvelist_length = 0;
SSLerr(SSL_F_SSL_PREPARE_CLIENTHELLO_TLSEXT,ERR_R_MALLOC_FAILURE);
return -1;
}
for (i = 1, j = s->tlsext_ellipticcurvelist; (unsigned int)i <=
sizeof(nid_list)/sizeof(nid_list[0]); i++)
s2n(i,j);
}
#endif /* OPENSSL_NO_EC */
#ifdef TLSEXT_TYPE_opaque_prf_input
{
int r = 1;
if (s->ctx->tlsext_opaque_prf_input_callback != 0)
{
r = s->ctx->tlsext_opaque_prf_input_callback(s, NULL, 0, s->ctx->tlsext_opaque_prf_input_callback_arg);
if (!r)
return -1;
}
if (s->tlsext_opaque_prf_input != NULL)
{
if (s->s3->client_opaque_prf_input != NULL) /* shouldn't really happen */
OPENSSL_free(s->s3->client_opaque_prf_input);
if (s->tlsext_opaque_prf_input_len == 0)
s->s3->client_opaque_prf_input = OPENSSL_malloc(1); /* dummy byte just to get non-NULL */
else
s->s3->client_opaque_prf_input = BUF_memdup(s->tlsext_opaque_prf_input, s->tlsext_opaque_prf_input_len);
if (s->s3->client_opaque_prf_input == NULL)
{
SSLerr(SSL_F_SSL_PREPARE_CLIENTHELLO_TLSEXT,ERR_R_MALLOC_FAILURE);
return -1;
}
s->s3->client_opaque_prf_input_len = s->tlsext_opaque_prf_input_len;
}
if (r == 2)
/* at callback's request, insist on receiving an appropriate server opaque PRF input */
s->s3->server_opaque_prf_input_len = s->tlsext_opaque_prf_input_len;
}
#endif
return 1;
}
| 0 |
[] |
openssl
|
edc032b5e3f3ebb1006a9c89e0ae00504f47966f
| 329,552,131,962,255,160,000,000,000,000,000,000,000 | 89 |
Add SRP support.
|
xmlSchemaTypeFixupWhitespace(xmlSchemaTypePtr type)
{
/*
* Evaluate the whitespace-facet value.
*/
if (WXS_IS_LIST(type)) {
type->flags |= XML_SCHEMAS_TYPE_WHITESPACE_COLLAPSE;
return (0);
} else if (WXS_IS_UNION(type))
return (0);
if (type->facetSet != NULL) {
xmlSchemaFacetLinkPtr lin;
for (lin = type->facetSet; lin != NULL; lin = lin->next) {
if (lin->facet->type == XML_SCHEMA_FACET_WHITESPACE) {
switch (lin->facet->whitespace) {
case XML_SCHEMAS_FACET_PRESERVE:
type->flags |= XML_SCHEMAS_TYPE_WHITESPACE_PRESERVE;
break;
case XML_SCHEMAS_FACET_REPLACE:
type->flags |= XML_SCHEMAS_TYPE_WHITESPACE_REPLACE;
break;
case XML_SCHEMAS_FACET_COLLAPSE:
type->flags |= XML_SCHEMAS_TYPE_WHITESPACE_COLLAPSE;
break;
default:
return (-1);
}
return (0);
}
}
}
/*
* For all `atomic` datatypes other than string (and types `derived`
* by `restriction` from it) the value of whiteSpace is fixed to
* collapse
*/
{
xmlSchemaTypePtr anc;
for (anc = type->baseType; anc != NULL &&
anc->builtInType != XML_SCHEMAS_ANYTYPE;
anc = anc->baseType) {
if (anc->type == XML_SCHEMA_TYPE_BASIC) {
if (anc->builtInType == XML_SCHEMAS_NORMSTRING) {
type->flags |= XML_SCHEMAS_TYPE_WHITESPACE_REPLACE;
} else if ((anc->builtInType == XML_SCHEMAS_STRING) ||
(anc->builtInType == XML_SCHEMAS_ANYSIMPLETYPE)) {
type->flags |= XML_SCHEMAS_TYPE_WHITESPACE_PRESERVE;
} else
type->flags |= XML_SCHEMAS_TYPE_WHITESPACE_COLLAPSE;
break;
}
}
}
return (0);
}
| 0 |
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
| 36,320,209,088,540,340,000,000,000,000,000,000,000 | 63 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
|
hb_ot_layout_feature_get_lookup_count (hb_ot_layout_t *layout,
hb_ot_layout_table_type_t table_type,
unsigned int feature_index)
{
const GSUBGPOS &g = get_gsubgpos_table (layout, table_type);
const Feature &f = g.get_feature (feature_index);
return f.get_lookup_count ();
}
| 0 |
[] |
pango
|
336bb3201096bdd0494d29926dd44e8cca8bed26
| 190,102,262,738,329,530,000,000,000,000,000,000,000 | 9 |
[HB] Remove all references to the old code!
|
_zip_read_eocd64(zip_source_t *src, zip_buffer_t *buffer, zip_uint64_t buf_offset, unsigned int flags, zip_error_t *error)
{
zip_cdir_t *cd;
zip_uint64_t offset;
zip_uint8_t eocd[EOCD64LEN];
zip_uint64_t eocd_offset;
zip_uint64_t size, nentry, i, eocdloc_offset;
bool free_buffer;
zip_uint32_t num_disks, num_disks64, eocd_disk, eocd_disk64;
eocdloc_offset = _zip_buffer_offset(buffer);
_zip_buffer_get(buffer, 4); /* magic already verified */
num_disks = _zip_buffer_get_16(buffer);
eocd_disk = _zip_buffer_get_16(buffer);
eocd_offset = _zip_buffer_get_64(buffer);
if (eocd_offset > ZIP_INT64_MAX || eocd_offset + EOCD64LEN < eocd_offset) {
zip_error_set(error, ZIP_ER_SEEK, EFBIG);
return NULL;
}
if (eocd_offset + EOCD64LEN > eocdloc_offset + buf_offset) {
zip_error_set(error, ZIP_ER_INCONS, 0);
return NULL;
}
if (eocd_offset >= buf_offset && eocd_offset + EOCD64LEN <= buf_offset + _zip_buffer_size(buffer)) {
_zip_buffer_set_offset(buffer, eocd_offset - buf_offset);
free_buffer = false;
}
else {
if (zip_source_seek(src, (zip_int64_t)eocd_offset, SEEK_SET) < 0) {
_zip_error_set_from_source(error, src);
return NULL;
}
if ((buffer = _zip_buffer_new_from_source(src, EOCD64LEN, eocd, error)) == NULL) {
return NULL;
}
free_buffer = true;
}
if (memcmp(_zip_buffer_get(buffer, 4), EOCD64_MAGIC, 4) != 0) {
zip_error_set(error, ZIP_ER_INCONS, 0);
if (free_buffer) {
_zip_buffer_free(buffer);
}
return NULL;
}
size = _zip_buffer_get_64(buffer);
if ((flags & ZIP_CHECKCONS) && size + eocd_offset + 12 != buf_offset + eocdloc_offset) {
zip_error_set(error, ZIP_ER_INCONS, 0);
if (free_buffer) {
_zip_buffer_free(buffer);
}
return NULL;
}
_zip_buffer_get(buffer, 4); /* skip version made by/needed */
num_disks64 = _zip_buffer_get_32(buffer);
eocd_disk64 = _zip_buffer_get_32(buffer);
/* if eocd values are 0xffff, we have to use eocd64 values.
otherwise, if the values are not the same, it's inconsistent;
in any case, if the value is not 0, we don't support it */
if (num_disks == 0xffff) {
num_disks = num_disks64;
}
if (eocd_disk == 0xffff) {
eocd_disk = eocd_disk64;
}
if ((flags & ZIP_CHECKCONS) && (eocd_disk != eocd_disk64 || num_disks != num_disks64)) {
zip_error_set(error, ZIP_ER_INCONS, 0);
if (free_buffer) {
_zip_buffer_free(buffer);
}
return NULL;
}
if (num_disks != 0 || eocd_disk != 0) {
zip_error_set(error, ZIP_ER_MULTIDISK, 0);
if (free_buffer) {
_zip_buffer_free(buffer);
}
return NULL;
}
nentry = _zip_buffer_get_64(buffer);
i = _zip_buffer_get_64(buffer);
if (nentry != i) {
zip_error_set(error, ZIP_ER_MULTIDISK, 0);
if (free_buffer) {
_zip_buffer_free(buffer);
}
return NULL;
}
size = _zip_buffer_get_64(buffer);
offset = _zip_buffer_get_64(buffer);
if (!_zip_buffer_ok(buffer)) {
zip_error_set(error, ZIP_ER_INTERNAL, 0);
if (free_buffer) {
_zip_buffer_free(buffer);
}
return NULL;
}
if (free_buffer) {
_zip_buffer_free(buffer);
}
if (offset > ZIP_INT64_MAX || offset+size < offset) {
zip_error_set(error, ZIP_ER_SEEK, EFBIG);
return NULL;
}
if ((flags & ZIP_CHECKCONS) && offset+size != eocd_offset) {
zip_error_set(error, ZIP_ER_INCONS, 0);
return NULL;
}
if ((cd=_zip_cdir_new(nentry, error)) == NULL)
return NULL;
cd->is_zip64 = true;
cd->size = size;
cd->offset = offset;
return cd;
}
| 1 |
[
"CWE-119",
"CWE-770",
"CWE-787"
] |
libzip
|
9b46957ec98d85a572e9ef98301247f39338a3b5
| 58,085,678,989,821,840,000,000,000,000,000,000,000 | 134 |
Make eocd checks more consistent between zip and zip64 cases.
|
Linker* PackLinuxElf32armBe::newLinker() const
{
return new ElfLinkerArmBE();
}
| 0 |
[
"CWE-476"
] |
upx
|
ef336dbcc6dc8344482f8cf6c909ae96c3286317
| 218,782,371,260,626,830,000,000,000,000,000,000,000 | 4 |
Protect against bad crafted input.
https://github.com/upx/upx/issues/128
modified: p_lx_elf.cpp
|
flatpak_run_add_wayland_args (FlatpakBwrap *bwrap)
{
const char *wayland_display;
g_autofree char *user_runtime_dir = flatpak_get_real_xdg_runtime_dir ();
g_autofree char *wayland_socket = NULL;
g_autofree char *sandbox_wayland_socket = NULL;
gboolean res = FALSE;
struct stat statbuf;
wayland_display = g_getenv ("WAYLAND_DISPLAY");
if (!wayland_display)
wayland_display = "wayland-0";
wayland_socket = g_build_filename (user_runtime_dir, wayland_display, NULL);
sandbox_wayland_socket = g_strdup_printf ("/run/user/%d/%s", getuid (), wayland_display);
if (stat (wayland_socket, &statbuf) == 0 &&
(statbuf.st_mode & S_IFMT) == S_IFSOCK)
{
res = TRUE;
flatpak_bwrap_add_args (bwrap,
"--ro-bind", wayland_socket, sandbox_wayland_socket,
NULL);
}
return res;
}
| 0 |
[
"CWE-20"
] |
flatpak
|
a9107feeb4b8275b78965b36bf21b92d5724699e
| 254,393,103,430,019,600,000,000,000,000,000,000,000 | 26 |
run: Only compare the lowest 32 ioctl arg bits for TIOCSTI
Closes #2782.
Closes: #2783
Approved by: alexlarsson
|
suck_data(char *data, int *store_len, Gif_Reader *grr)
{
uint8_t len = gifgetbyte(grr);
int total_len = 0;
while (len > 0) {
Gif_ReArray(data, char, total_len + len + 1);
if (!data) return 0;
gifgetblock((uint8_t *)data + total_len, len, grr);
total_len += len;
data[total_len] = 0;
len = gifgetbyte(grr);
}
if (store_len) *store_len = total_len;
return data;
}
| 0 |
[
"CWE-416"
] |
gifsicle
|
81fd7823f6d9c85ab598bc850e40382068361185
| 155,405,374,022,019,220,000,000,000,000,000,000,000 | 19 |
Fix use-after-free problems reported in #114.
|
int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVMuxContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
MOVTrack *trk = &mov->tracks[pkt->stream_index];
AVCodecParameters *par = trk->par;
unsigned int samples_in_chunk = 0;
int size = pkt->size, ret = 0;
uint8_t *reformatted_data = NULL;
ret = check_pkt(s, pkt);
if (ret < 0)
return ret;
if (mov->flags & FF_MOV_FLAG_FRAGMENT) {
int ret;
if (mov->moov_written || mov->flags & FF_MOV_FLAG_EMPTY_MOOV) {
if (mov->frag_interleave && mov->fragments > 0) {
if (trk->entry - trk->entries_flushed >= mov->frag_interleave) {
if ((ret = mov_flush_fragment_interleaving(s, trk)) < 0)
return ret;
}
}
if (!trk->mdat_buf) {
if ((ret = avio_open_dyn_buf(&trk->mdat_buf)) < 0)
return ret;
}
pb = trk->mdat_buf;
} else {
if (!mov->mdat_buf) {
if ((ret = avio_open_dyn_buf(&mov->mdat_buf)) < 0)
return ret;
}
pb = mov->mdat_buf;
}
}
if (par->codec_id == AV_CODEC_ID_AMR_NB) {
/* We must find out how many AMR blocks there are in one packet */
static const uint16_t packed_size[16] =
{13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 1};
int len = 0;
while (len < size && samples_in_chunk < 100) {
len += packed_size[(pkt->data[len] >> 3) & 0x0F];
samples_in_chunk++;
}
if (samples_in_chunk > 1) {
av_log(s, AV_LOG_ERROR, "fatal error, input is not a single packet, implement a AVParser for it\n");
return -1;
}
} else if (par->codec_id == AV_CODEC_ID_ADPCM_MS ||
par->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) {
samples_in_chunk = trk->par->frame_size;
} else if (trk->sample_size)
samples_in_chunk = size / trk->sample_size;
else
samples_in_chunk = 1;
if (samples_in_chunk < 1) {
av_log(s, AV_LOG_ERROR, "fatal error, input packet contains no samples\n");
return AVERROR_PATCHWELCOME;
}
/* copy extradata if it exists */
if (trk->vos_len == 0 && par->extradata_size > 0 &&
!TAG_IS_AVCI(trk->tag) &&
(par->codec_id != AV_CODEC_ID_DNXHD)) {
trk->vos_len = par->extradata_size;
trk->vos_data = av_malloc(trk->vos_len);
if (!trk->vos_data) {
ret = AVERROR(ENOMEM);
goto err;
}
memcpy(trk->vos_data, par->extradata, trk->vos_len);
}
if (par->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 &&
(AV_RB16(pkt->data) & 0xfff0) == 0xfff0) {
if (!s->streams[pkt->stream_index]->nb_frames) {
av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: "
"use the audio bitstream filter 'aac_adtstoasc' to fix it "
"('-bsf:a aac_adtstoasc' option with ffmpeg)\n");
return -1;
}
av_log(s, AV_LOG_WARNING, "aac bitstream error\n");
}
if (par->codec_id == AV_CODEC_ID_H264 && trk->vos_len > 0 && *(uint8_t *)trk->vos_data != 1 && !TAG_IS_AVCI(trk->tag)) {
/* from x264 or from bytestream H.264 */
/* NAL reformatting needed */
if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) {
ff_avc_parse_nal_units_buf(pkt->data, &reformatted_data,
&size);
avio_write(pb, reformatted_data, size);
} else {
if (trk->cenc.aes_ctr) {
size = ff_mov_cenc_avc_parse_nal_units(&trk->cenc, pb, pkt->data, size);
if (size < 0) {
ret = size;
goto err;
}
} else {
size = ff_avc_parse_nal_units(pb, pkt->data, pkt->size);
}
}
} else if (par->codec_id == AV_CODEC_ID_HEVC && trk->vos_len > 6 &&
(AV_RB24(trk->vos_data) == 1 || AV_RB32(trk->vos_data) == 1)) {
/* extradata is Annex B, assume the bitstream is too and convert it */
if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) {
ff_hevc_annexb2mp4_buf(pkt->data, &reformatted_data, &size, 0, NULL);
avio_write(pb, reformatted_data, size);
} else {
size = ff_hevc_annexb2mp4(pb, pkt->data, pkt->size, 0, NULL);
}
#if CONFIG_AC3_PARSER
} else if (par->codec_id == AV_CODEC_ID_EAC3) {
size = handle_eac3(mov, pkt, trk);
if (size < 0)
return size;
else if (!size)
goto end;
avio_write(pb, pkt->data, size);
#endif
} else {
if (trk->cenc.aes_ctr) {
if (par->codec_id == AV_CODEC_ID_H264 && par->extradata_size > 4) {
int nal_size_length = (par->extradata[4] & 0x3) + 1;
ret = ff_mov_cenc_avc_write_nal_units(s, &trk->cenc, nal_size_length, pb, pkt->data, size);
} else {
ret = ff_mov_cenc_write_packet(&trk->cenc, pb, pkt->data, size);
}
if (ret) {
goto err;
}
} else {
avio_write(pb, pkt->data, size);
}
}
if ((par->codec_id == AV_CODEC_ID_DNXHD ||
par->codec_id == AV_CODEC_ID_AC3) && !trk->vos_len) {
/* copy frame to create needed atoms */
trk->vos_len = size;
trk->vos_data = av_malloc(size);
if (!trk->vos_data) {
ret = AVERROR(ENOMEM);
goto err;
}
memcpy(trk->vos_data, pkt->data, size);
}
if (trk->entry >= trk->cluster_capacity) {
unsigned new_capacity = 2 * (trk->entry + MOV_INDEX_CLUSTER_SIZE);
if (av_reallocp_array(&trk->cluster, new_capacity,
sizeof(*trk->cluster))) {
ret = AVERROR(ENOMEM);
goto err;
}
trk->cluster_capacity = new_capacity;
}
trk->cluster[trk->entry].pos = avio_tell(pb) - size;
trk->cluster[trk->entry].samples_in_chunk = samples_in_chunk;
trk->cluster[trk->entry].chunkNum = 0;
trk->cluster[trk->entry].size = size;
trk->cluster[trk->entry].entries = samples_in_chunk;
trk->cluster[trk->entry].dts = pkt->dts;
trk->cluster[trk->entry].pts = pkt->pts;
if (!trk->entry && trk->start_dts != AV_NOPTS_VALUE) {
if (!trk->frag_discont) {
/* First packet of a new fragment. We already wrote the duration
* of the last packet of the previous fragment based on track_duration,
* which might not exactly match our dts. Therefore adjust the dts
* of this packet to be what the previous packets duration implies. */
trk->cluster[trk->entry].dts = trk->start_dts + trk->track_duration;
/* We also may have written the pts and the corresponding duration
* in sidx/tfrf/tfxd tags; make sure the sidx pts and duration match up with
* the next fragment. This means the cts of the first sample must
* be the same in all fragments, unless end_pts was updated by
* the packet causing the fragment to be written. */
if ((mov->flags & FF_MOV_FLAG_DASH && !(mov->flags & FF_MOV_FLAG_GLOBAL_SIDX)) ||
mov->mode == MODE_ISM)
pkt->pts = pkt->dts + trk->end_pts - trk->cluster[trk->entry].dts;
} else {
/* New fragment, but discontinuous from previous fragments.
* Pretend the duration sum of the earlier fragments is
* pkt->dts - trk->start_dts. */
trk->frag_start = pkt->dts - trk->start_dts;
trk->end_pts = AV_NOPTS_VALUE;
trk->frag_discont = 0;
}
}
if (!trk->entry && trk->start_dts == AV_NOPTS_VALUE && !mov->use_editlist &&
s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
/* Not using edit lists and shifting the first track to start from zero.
* If the other streams start from a later timestamp, we won't be able
* to signal the difference in starting time without an edit list.
* Thus move the timestamp for this first sample to 0, increasing
* its duration instead. */
trk->cluster[trk->entry].dts = trk->start_dts = 0;
}
if (trk->start_dts == AV_NOPTS_VALUE) {
trk->start_dts = pkt->dts;
if (trk->frag_discont) {
if (mov->use_editlist) {
/* Pretend the whole stream started at pts=0, with earlier fragments
* already written. If the stream started at pts=0, the duration sum
* of earlier fragments would have been pkt->pts. */
trk->frag_start = pkt->pts;
trk->start_dts = pkt->dts - pkt->pts;
} else {
/* Pretend the whole stream started at dts=0, with earlier fragments
* already written, with a duration summing up to pkt->dts. */
trk->frag_start = pkt->dts;
trk->start_dts = 0;
}
trk->frag_discont = 0;
} else if (pkt->dts && mov->moov_written)
av_log(s, AV_LOG_WARNING,
"Track %d starts with a nonzero dts %"PRId64", while the moov "
"already has been written. Set the delay_moov flag to handle "
"this case.\n",
pkt->stream_index, pkt->dts);
}
trk->track_duration = pkt->dts - trk->start_dts + pkt->duration;
trk->last_sample_is_subtitle_end = 0;
if (pkt->pts == AV_NOPTS_VALUE) {
av_log(s, AV_LOG_WARNING, "pts has no value\n");
pkt->pts = pkt->dts;
}
if (pkt->dts != pkt->pts)
trk->flags |= MOV_TRACK_CTTS;
trk->cluster[trk->entry].cts = pkt->pts - pkt->dts;
trk->cluster[trk->entry].flags = 0;
if (trk->start_cts == AV_NOPTS_VALUE)
trk->start_cts = pkt->pts - pkt->dts;
if (trk->end_pts == AV_NOPTS_VALUE)
trk->end_pts = trk->cluster[trk->entry].dts +
trk->cluster[trk->entry].cts + pkt->duration;
else
trk->end_pts = FFMAX(trk->end_pts, trk->cluster[trk->entry].dts +
trk->cluster[trk->entry].cts +
pkt->duration);
if (par->codec_id == AV_CODEC_ID_VC1) {
mov_parse_vc1_frame(pkt, trk);
} else if (pkt->flags & AV_PKT_FLAG_KEY) {
if (mov->mode == MODE_MOV && par->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
trk->entry > 0) { // force sync sample for the first key frame
mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags);
if (trk->cluster[trk->entry].flags & MOV_PARTIAL_SYNC_SAMPLE)
trk->flags |= MOV_TRACK_STPS;
} else {
trk->cluster[trk->entry].flags = MOV_SYNC_SAMPLE;
}
if (trk->cluster[trk->entry].flags & MOV_SYNC_SAMPLE)
trk->has_keyframes++;
}
if (pkt->flags & AV_PKT_FLAG_DISPOSABLE) {
trk->cluster[trk->entry].flags |= MOV_DISPOSABLE_SAMPLE;
trk->has_disposable++;
}
trk->entry++;
trk->sample_count += samples_in_chunk;
mov->mdat_size += size;
if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams)
ff_mov_add_hinted_packet(s, pkt, trk->hint_track, trk->entry,
reformatted_data, size);
end:
err:
av_free(reformatted_data);
return ret;
}
| 0 |
[
"CWE-369"
] |
FFmpeg
|
fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
| 79,865,082,450,035,550,000,000,000,000,000,000,000 | 280 |
avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
static inline void qxl_push_free_res(PCIQXLDevice *d, int flush)
{
QXLReleaseRing *ring = &d->ram->release_ring;
uint64_t *item;
int notify;
#define QXL_FREE_BUNCH_SIZE 32
if (ring->prod - ring->cons + 1 == ring->num_items) {
/* ring full -- can't push */
return;
}
if (!flush && d->oom_running) {
/* collect everything from oom handler before pushing */
return;
}
if (!flush && d->num_free_res < QXL_FREE_BUNCH_SIZE) {
/* collect a bit more before pushing */
return;
}
SPICE_RING_PUSH(ring, notify);
dprint(d, 2, "free: push %d items, notify %s, ring %d/%d [%d,%d]\n",
d->num_free_res, notify ? "yes" : "no",
ring->prod - ring->cons, ring->num_items,
ring->prod, ring->cons);
if (notify) {
qxl_send_events(d, QXL_INTERRUPT_DISPLAY);
}
SPICE_RING_PROD_ITEM(ring, item);
*item = 0;
d->num_free_res = 0;
d->last_release = NULL;
qxl_ring_set_dirty(d);
}
| 0 |
[] |
qemu-kvm
|
5ff4e36c804157bd84af43c139f8cd3a59722db9
| 131,113,636,278,945,200,000,000,000,000,000,000,000 | 35 |
qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Alon Levy <[email protected]>
|
SV*
Perl__new_invlist(pTHX_ IV initial_size)
{
/* Return a pointer to a newly constructed inversion list, with enough
* space to store 'initial_size' elements. If that number is negative, a
* system default is used instead */
SV* new_list;
if (initial_size < 0) {
initial_size = 10;
}
new_list = newSV_type(SVt_INVLIST);
initialize_invlist_guts(new_list, initial_size);
return new_list;
| 0 |
[
"CWE-190",
"CWE-787"
] |
perl5
|
897d1f7fd515b828e4b198d8b8bef76c6faf03ed
| 8,298,253,907,633,624,000,000,000,000,000,000,000 | 18 |
regcomp.c: Prevent integer overflow from nested regex quantifiers.
(CVE-2020-10543) On 32bit systems the size calculations for nested regular
expression quantifiers could overflow causing heap memory corruption.
Fixes: Perl/perl5-security#125
(cherry picked from commit bfd31397db5dc1a5c5d3e0a1f753a4f89a736e71)
|
struct Cookie *Curl_cookie_getlist(struct CookieInfo *c,
const char *host, const char *path,
bool secure)
{
struct Cookie *newco;
struct Cookie *co;
struct Cookie *mainco = NULL;
size_t matches = 0;
bool is_ip;
const size_t myhash = cookiehash(host);
if(!c || !c->cookies[myhash])
return NULL; /* no cookie struct or no cookies in the struct */
/* at first, remove expired cookies */
remove_expired(c);
/* check if host is an IP(v4|v6) address */
is_ip = Curl_host_is_ipnum(host);
co = c->cookies[myhash];
while(co) {
/* if the cookie requires we're secure we must only continue if we are! */
if(co->secure?secure:TRUE) {
/* now check if the domain is correct */
if(!co->domain ||
(co->tailmatch && !is_ip && tailmatch(co->domain, host)) ||
((!co->tailmatch || is_ip) && strcasecompare(host, co->domain)) ) {
/*
* the right part of the host matches the domain stuff in the
* cookie data
*/
/*
* now check the left part of the path with the cookies path
* requirement
*/
if(!co->spath || pathmatch(co->spath, path) ) {
/*
* and now, we know this is a match and we should create an
* entry for the return-linked-list
*/
newco = dup_cookie(co);
if(newco) {
/* then modify our next */
newco->next = mainco;
/* point the main to us */
mainco = newco;
matches++;
}
else
goto fail;
}
}
}
co = co->next;
}
if(matches) {
/*
* Now we need to make sure that if there is a name appearing more than
* once, the longest specified path version comes first. To make this
* the swiftest way, we just sort them all based on path length.
*/
struct Cookie **array;
size_t i;
/* alloc an array and store all cookie pointers */
array = malloc(sizeof(struct Cookie *) * matches);
if(!array)
goto fail;
co = mainco;
for(i = 0; co; co = co->next)
array[i++] = co;
/* now sort the cookie pointers in path length order */
qsort(array, matches, sizeof(struct Cookie *), cookie_sort);
/* remake the linked list order according to the new order */
mainco = array[0]; /* start here */
for(i = 0; i<matches-1; i++)
array[i]->next = array[i + 1];
array[matches-1]->next = NULL; /* terminate the list */
free(array); /* remove the temporary data again */
}
return mainco; /* return the new list */
fail:
/* failure, clear up the allocated chain and return NULL */
Curl_cookie_freelist(mainco);
return NULL;
}
| 1 |
[] |
curl
|
48d7064a49148f03942380967da739dcde1cdc24
| 252,504,344,232,936,600,000,000,000,000,000,000,000 | 103 |
cookie: apply limits
- Send no more than 150 cookies per request
- Cap the max length used for a cookie: header to 8K
- Cap the max number of received Set-Cookie: headers to 50
Bug: https://curl.se/docs/CVE-2022-32205.html
CVE-2022-32205
Reported-by: Harry Sintonen
Closes #9048
|
int find_slot_by_number_and_label(pkcs11_handle_t *h,
int wanted_slot_id,
const char *wanted_token_label,
unsigned int *slot_num)
{
unsigned int slot_index;
int rv;
const char *token_label = NULL;
/* we want a specific slot id, or we don't care about the label */
if ((wanted_token_label == NULL) || (wanted_slot_id != 0)) {
rv = find_slot_by_number(h, wanted_slot_id, slot_num);
/* if we don't care about the label, or we failed, we're done */
if ((wanted_token_label == NULL) || (rv != 0)) {
return rv;
}
/* verify it's the label we want */
token_label = h->slots[*slot_num].label;
if ((token_label != NULL) &&
(strcmp (wanted_token_label, token_label) == 0)) {
return 0;
}
return -1;
}
/* look up the slot by it's label from the list */
for (slot_index = 0; slot_index < h->slot_count; slot_index++) {
if (h->slots[slot_index].token_present) {
token_label = h->slots[slot_index].label;
if ((token_label != NULL) &&
(strcmp (wanted_token_label, token_label) == 0)) {
*slot_num = slot_index;
return 0;
}
}
}
return -1;
}
| 0 |
[] |
pam_pkcs11
|
cc51b3e2720ea862d500cab2ea517518ff39a497
| 18,833,593,976,741,366,000,000,000,000,000,000,000 | 41 |
verify using a nonce from the system, not the card
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problem.
|
agoo_ws_add_headers(agooReq req, agooText t) {
int klen = 0;
const char *key;
t = agoo_text_append(t, up_con, sizeof(up_con) - 1);
if (NULL != (key = agoo_con_header_value(req->header.start, req->header.len, "Sec-WebSocket-Key", &klen)) &&
klen + sizeof(ws_magic) < MAX_KEY_LEN) {
char buf[MAX_KEY_LEN];
unsigned char sha[32];
int len = klen + sizeof(ws_magic) - 1;
strncpy(buf, key, klen);
strcpy(buf + klen, ws_magic);
sha1((unsigned char*)buf, len, sha);
len = b64_to(sha, SHA1_DIGEST_SIZE, buf);
t = agoo_text_append(t, ws_accept, sizeof(ws_accept) - 1);
t = agoo_text_append(t, buf, len);
t = agoo_text_append(t, "\r\n", 2);
}
if (NULL != (key = agoo_con_header_value(req->header.start, req->header.len, "Sec-WebSocket-Protocol", &klen))) {
t = agoo_text_append(t, ws_protocol, sizeof(ws_protocol) - 1);
t = agoo_text_append(t, key, klen);
t = agoo_text_append(t, "\r\n", 2);
}
return t;
}
| 0 |
[
"CWE-444",
"CWE-61"
] |
agoo
|
23d03535cf7b50d679a60a953a0cae9519a4a130
| 295,312,727,521,629,900,000,000,000,000,000,000,000 | 27 |
Remote addr (#99)
* REMOTE_ADDR added
* Ready for merge
|
parser_parse_with_statement_start (parser_context_t *context_p) /**< context */
{
parser_with_statement_t with_statement;
if (context_p->status_flags & PARSER_IS_STRICT)
{
parser_raise_error (context_p, PARSER_ERR_WITH_NOT_ALLOWED);
}
parser_parse_enclosed_expr (context_p);
#ifndef JERRY_NDEBUG
PARSER_PLUS_EQUAL_U16 (context_p->context_stack_depth, PARSER_WITH_CONTEXT_STACK_ALLOCATION);
#endif /* !JERRY_NDEBUG */
uint8_t inside_with = (context_p->status_flags & PARSER_INSIDE_WITH) != 0;
context_p->status_flags |= PARSER_INSIDE_WITH;
parser_emit_cbc_ext_forward_branch (context_p,
CBC_EXT_WITH_CREATE_CONTEXT,
&with_statement.branch);
parser_stack_push (context_p, &with_statement, sizeof (parser_with_statement_t));
parser_stack_push_uint8 (context_p, inside_with);
parser_stack_push_uint8 (context_p, PARSER_STATEMENT_WITH);
parser_stack_iterator_init (context_p, &context_p->last_statement);
} /* parser_parse_with_statement_start */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 120,884,003,196,308,800,000,000,000,000,000,000,000 | 27 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
static int show_programs(WriterContext *w, InputFile *ifile)
{
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
int i, ret = 0;
writer_print_section_header(w, SECTION_ID_PROGRAMS);
for (i = 0; i < fmt_ctx->nb_programs; i++) {
AVProgram *program = fmt_ctx->programs[i];
if (!program)
continue;
ret = show_program(w, ifile, program);
if (ret < 0)
break;
}
writer_print_section_footer(w);
return ret;
}
| 0 |
[
"CWE-476"
] |
FFmpeg
|
837cb4325b712ff1aab531bf41668933f61d75d2
| 202,759,217,933,099,500,000,000,000,000,000,000,000 | 17 |
ffprobe: Fix null pointer dereference with color primaries
Found-by: AD-lab of venustech
Signed-off-by: Michael Niedermayer <[email protected]>
|
static QIOChannel *nbd_negotiate_handle_starttls(NBDClient *client,
uint32_t length)
{
QIOChannel *ioc;
QIOChannelTLS *tioc;
struct NBDTLSHandshakeData data = { 0 };
TRACE("Setting up TLS");
ioc = client->ioc;
if (length) {
if (nbd_drop(ioc, length, NULL) < 0) {
return NULL;
}
nbd_negotiate_send_rep_err(ioc, NBD_REP_ERR_INVALID, NBD_OPT_STARTTLS,
"OPT_STARTTLS should not have length");
return NULL;
}
if (nbd_negotiate_send_rep(client->ioc, NBD_REP_ACK,
NBD_OPT_STARTTLS) < 0) {
return NULL;
}
tioc = qio_channel_tls_new_server(ioc,
client->tlscreds,
client->tlsaclname,
NULL);
if (!tioc) {
return NULL;
}
qio_channel_set_name(QIO_CHANNEL(tioc), "nbd-server-tls");
TRACE("Starting TLS handshake");
data.loop = g_main_loop_new(g_main_context_default(), FALSE);
qio_channel_tls_handshake(tioc,
nbd_tls_handshake,
&data,
NULL);
if (!data.complete) {
g_main_loop_run(data.loop);
}
g_main_loop_unref(data.loop);
if (data.error) {
object_unref(OBJECT(tioc));
error_free(data.error);
return NULL;
}
return QIO_CHANNEL(tioc);
}
| 0 |
[
"CWE-20"
] |
qemu
|
2b0bbc4f8809c972bad134bc1a2570dbb01dea0b
| 3,280,951,607,259,039,600,000,000,000,000,000,000 | 51 |
nbd/server: get rid of nbd_negotiate_read and friends
Functions nbd_negotiate_{read,write,drop_sync} were introduced in
1a6245a5b, when nbd_rwv (was nbd_wr_sync) was working through
qemu_co_sendv_recvv (the path is nbd_wr_sync -> qemu_co_{recv/send} ->
qemu_co_send_recv -> qemu_co_sendv_recvv), which just yields, without
setting any handlers. But starting from ff82911cd nbd_rwv (was
nbd_wr_syncv) works through qio_channel_yield() which sets handlers, so
watchers are redundant in nbd_negotiate_{read,write,drop_sync}, then,
let's just use nbd_{read,write,drop} functions.
Functions nbd_{read,write,drop} has errp parameter, which is unused in
this patch. This will be fixed later.
Signed-off-by: Vladimir Sementsov-Ogievskiy <[email protected]>
Reviewed-by: Eric Blake <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
Http::Stream::packRange(StoreIOBuffer const &source, MemBuf *mb)
{
HttpHdrRangeIter * i = &http->range_iter;
Range<int64_t> available(source.range());
char const *buf = source.data;
while (i->currentSpec() && available.size()) {
const size_t copy_sz = lengthToSend(available);
if (copy_sz) {
// intersection of "have" and "need" ranges must not be empty
assert(http->out.offset < i->currentSpec()->offset + i->currentSpec()->length);
assert(http->out.offset + (int64_t)available.size() > i->currentSpec()->offset);
/*
* put boundary and headers at the beginning of a range in a
* multi-range
*/
if (http->multipartRangeRequest() && i->debt() == i->currentSpec()->length) {
assert(http->memObject());
clientPackRangeHdr(
http->memObject()->getReply(), /* original reply */
i->currentSpec(), /* current range */
i->boundary, /* boundary, the same for all */
mb);
}
// append content
debugs(33, 3, "appending " << copy_sz << " bytes");
noteSentBodyBytes(copy_sz);
mb->append(buf, copy_sz);
// update offsets
available.start += copy_sz;
buf += copy_sz;
}
if (!canPackMoreRanges()) {
debugs(33, 3, "Returning because !canPackMoreRanges.");
if (i->debt() == 0)
// put terminating boundary for multiparts
clientPackTermBound(i->boundary, mb);
return;
}
int64_t nextOffset = getNextRangeOffset();
assert(nextOffset >= http->out.offset);
int64_t skip = nextOffset - http->out.offset;
/* adjust for not to be transmitted bytes */
http->out.offset = nextOffset;
if (available.size() <= (uint64_t)skip)
return;
available.start += skip;
buf += skip;
if (copy_sz == 0)
return;
}
}
| 0 |
[
"CWE-20"
] |
squid
|
1e05a85bd28c22c9ca5d3ac9f5e86d6269ec0a8c
| 78,367,761,728,905,780,000,000,000,000,000,000,000 | 60 |
Handle more partial responses (#791)
|
const char *av_get_profile_name(const AVCodec *codec, int profile)
{
const AVProfile *p;
if (profile == FF_PROFILE_UNKNOWN || !codec->profiles)
return NULL;
for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++)
if (p->profile == profile)
return p->name;
return NULL;
}
| 0 |
[
"CWE-703"
] |
FFmpeg
|
e5c7229999182ad1cef13b9eca050dba7a5a08da
| 252,371,760,693,763,100,000,000,000,000,000,000,000 | 12 |
avcodec/utils: set AVFrame format unconditional
Fixes inconsistency and out of array accesses
Fixes: 10cdd7e63e7f66e3e66273939e0863dd-asan_heap-oob_1a4ff32_7078_cov_4056274555_mov_h264_aac__mp4box_frag.mp4
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]>
|
TEST(BitTestMatchExpression, DoesNotMatchIntegerWithBitMask) {
long long bitMaskSet = 118;
long long bitMaskClear = 11;
BSONObj match1 = fromjson("{a: NumberInt(54)}");
BSONObj match2 = fromjson("{a: NumberLong(54)}");
BSONObj match3 = fromjson("{a: 54.0}");
BitsAllSetMatchExpression balls;
BitsAllClearMatchExpression ballc;
BitsAnySetMatchExpression banys;
BitsAnyClearMatchExpression banyc;
ASSERT_OK(balls.init("a", bitMaskSet));
ASSERT_OK(ballc.init("a", bitMaskClear));
ASSERT_OK(banys.init("a", bitMaskSet));
ASSERT_OK(banyc.init("a", bitMaskClear));
ASSERT(!balls.matchesSingleElement(match1["a"]));
ASSERT(!balls.matchesSingleElement(match2["a"]));
ASSERT(!balls.matchesSingleElement(match3["a"]));
ASSERT(!ballc.matchesSingleElement(match1["a"]));
ASSERT(!ballc.matchesSingleElement(match2["a"]));
ASSERT(!ballc.matchesSingleElement(match3["a"]));
ASSERT(banys.matchesSingleElement(match1["a"]));
ASSERT(banys.matchesSingleElement(match2["a"]));
ASSERT(banys.matchesSingleElement(match3["a"]));
ASSERT(banyc.matchesSingleElement(match1["a"]));
ASSERT(banyc.matchesSingleElement(match2["a"]));
ASSERT(banyc.matchesSingleElement(match3["a"]));
}
| 0 |
[] |
mongo
|
b0ef26c639112b50648a02d969298650fbd402a4
| 89,650,466,772,881,590,000,000,000,000,000,000,000 | 30 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
|
Item_cache_str(THD *thd, const Item *item):
Item_cache(thd, item->type_handler()), value(0),
is_varbinary(item->type() == FIELD_ITEM &&
Item_cache_str::field_type() == MYSQL_TYPE_VARCHAR &&
!((const Item_field *) item)->field->has_charset())
{
collation.set(const_cast<DTCollation&>(item->collation));
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 88,770,589,986,369,980,000,000,000,000,000,000,000 | 8 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
sv_dispprefix (value)
const char *value;
{
int nval = 0;
if (value && *value)
{
nval = atoi (value);
if (nval < 0)
nval = 0;
}
_rl_completion_prefix_display_length = nval;
return 0;
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 10,037,013,507,653,791,000,000,000,000,000,000,000 | 14 |
bash-4.4-rc2 release
|
static int callbinTM (lua_State *L, const TValue *p1, const TValue *p2,
StkId res, TMS event) {
const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */
if (notm(tm))
tm = luaT_gettmbyobj(L, p2, event); /* try second operand */
if (notm(tm)) return 0;
luaT_callTMres(L, tm, p1, p2, res);
return 1;
}
| 0 |
[
"CWE-416",
"CWE-125",
"CWE-787"
] |
lua
|
eb41999461b6f428186c55abd95f4ce1a76217d5
| 111,379,067,351,467,610,000,000,000,000,000,000,000 | 9 |
Fixed bugs of stack reallocation x GC
Macro 'checkstackGC' was doing a GC step after resizing the stack;
the GC could shrink the stack and undo the resize. Moreover, macro
'checkstackp' also does a GC step, which could remove the preallocated
CallInfo when calling a function. (Its name has been changed to
'checkstackGCp' to emphasize that it calls the GC.)
|
void free_pgd_range(struct mmu_gather **tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
unsigned long next;
unsigned long start;
/*
* The next few lines have given us lots of grief...
*
* Why are we testing PMD* at this top level? Because often
* there will be no work to do at all, and we'd prefer not to
* go all the way down to the bottom just to discover that.
*
* Why all these "- 1"s? Because 0 represents both the bottom
* of the address space and the top of it (using -1 for the
* top wouldn't help much: the masks would do the wrong thing).
* The rule is that addr 0 and floor 0 refer to the bottom of
* the address space, but end 0 and ceiling 0 refer to the top
* Comparisons need to use "end - 1" and "ceiling - 1" (though
* that end 0 case should be mythical).
*
* Wherever addr is brought up or ceiling brought down, we must
* be careful to reject "the opposite 0" before it confuses the
* subsequent tests. But what about where end is brought down
* by PMD_SIZE below? no, end can't go down to 0 there.
*
* Whereas we round start (addr) and ceiling down, by different
* masks at different levels, in order to test whether a table
* now has no other vmas using it, so can be freed, we don't
* bother to round floor or end up - the tests don't need that.
*/
addr &= PMD_MASK;
if (addr < floor) {
addr += PMD_SIZE;
if (!addr)
return;
}
if (ceiling) {
ceiling &= PMD_MASK;
if (!ceiling)
return;
}
if (end - 1 > ceiling - 1)
end -= PMD_SIZE;
if (addr > end - 1)
return;
start = addr;
pgd = pgd_offset((*tlb)->mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
| 258,029,872,293,883,000,000,000,000,000,000,000,000 | 59 |
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
get_vpn_connections (NMApplet *applet)
{
GSList *all_connections;
GSList *iter;
GSList *list = NULL;
all_connections = applet_get_all_connections (applet);
for (iter = all_connections; iter; iter = iter->next) {
NMConnection *connection = NM_CONNECTION (iter->data);
NMSettingConnection *s_con;
s_con = NM_SETTING_CONNECTION (nm_connection_get_setting (connection, NM_TYPE_SETTING_CONNECTION));
if (strcmp (nm_setting_connection_get_connection_type (s_con), NM_SETTING_VPN_SETTING_NAME))
/* Not a VPN connection */
continue;
if (!nm_connection_get_setting (connection, NM_TYPE_SETTING_VPN)) {
g_warning ("%s: VPN connection '%s' didn't have requires vpn setting.", __func__,
nm_setting_get_name (NM_SETTING (s_con)));
continue;
}
list = g_slist_prepend (list, connection);
}
g_slist_free (all_connections);
return g_slist_sort (list, sort_vpn_connections);
}
| 0 |
[
"CWE-200"
] |
network-manager-applet
|
8627880e07c8345f69ed639325280c7f62a8f894
| 320,324,192,525,873,220,000,000,000,000,000,000,000 | 30 |
editor: prevent any registration of objects on the system bus
D-Bus access-control is name-based; so requests for a specific name
are allowed/denied based on the rules in /etc/dbus-1/system.d. But
apparently apps still get a non-named service on the bus, and if we
register *any* object even though we don't have a named service,
dbus and dbus-glib will happily proxy signals. Since the connection
editor shouldn't ever expose anything having to do with connections
on any bus, make sure that's the case.
|
static int sqfs_read_inode_table(unsigned char **inode_table)
{
struct squashfs_super_block *sblk = ctxt.sblk;
u64 start, n_blks, table_offset, table_size;
int j, ret = 0, metablks_count;
unsigned char *src_table, *itb;
u32 src_len, dest_offset = 0;
unsigned long dest_len = 0;
bool compressed;
table_size = get_unaligned_le64(&sblk->directory_table_start) -
get_unaligned_le64(&sblk->inode_table_start);
start = get_unaligned_le64(&sblk->inode_table_start) /
ctxt.cur_dev->blksz;
n_blks = sqfs_calc_n_blks(sblk->inode_table_start,
sblk->directory_table_start, &table_offset);
/* Allocate a proper sized buffer (itb) to store the inode table */
itb = malloc_cache_aligned(n_blks * ctxt.cur_dev->blksz);
if (!itb)
return -ENOMEM;
if (sqfs_disk_read(start, n_blks, itb) < 0) {
ret = -EINVAL;
goto free_itb;
}
/* Parse inode table (metadata block) header */
ret = sqfs_read_metablock(itb, table_offset, &compressed, &src_len);
if (ret) {
ret = -EINVAL;
goto free_itb;
}
/* Calculate size to store the whole decompressed table */
metablks_count = sqfs_count_metablks(itb, table_offset, table_size);
if (metablks_count < 1) {
ret = -EINVAL;
goto free_itb;
}
*inode_table = malloc(metablks_count * SQFS_METADATA_BLOCK_SIZE);
if (!*inode_table) {
ret = -ENOMEM;
printf("Error: failed to allocate squashfs inode_table of size %i, increasing CONFIG_SYS_MALLOC_LEN could help\n",
metablks_count * SQFS_METADATA_BLOCK_SIZE);
goto free_itb;
}
src_table = itb + table_offset + SQFS_HEADER_SIZE;
/* Extract compressed Inode table */
for (j = 0; j < metablks_count; j++) {
sqfs_read_metablock(itb, table_offset, &compressed, &src_len);
if (compressed) {
dest_len = SQFS_METADATA_BLOCK_SIZE;
ret = sqfs_decompress(&ctxt, *inode_table +
dest_offset, &dest_len,
src_table, src_len);
if (ret) {
free(*inode_table);
*inode_table = NULL;
goto free_itb;
}
dest_offset += dest_len;
} else {
memcpy(*inode_table + (j * SQFS_METADATA_BLOCK_SIZE),
src_table, src_len);
}
/*
* Offsets to the decompression destination, to the metadata
* buffer 'itb' and to the decompression source, respectively.
*/
table_offset += src_len + SQFS_HEADER_SIZE;
src_table += src_len + SQFS_HEADER_SIZE;
}
free_itb:
free(itb);
return ret;
}
| 0 |
[
"CWE-787"
] |
u-boot
|
2ac0baab4aff1a0b45067d0b62f00c15f4e86856
| 229,368,428,454,643,460,000,000,000,000,000,000,000 | 85 |
fs/squashfs: sqfs_read: Prevent arbitrary code execution
Following Jincheng's report, an out-of-band write leading to arbitrary
code execution is possible because on one side the squashfs logic
accepts directory names up to 65535 bytes (u16), while U-Boot fs logic
accepts directory names up to 255 bytes long.
Prevent such an exploit from happening by capping directory name sizes
to 255. Use a define for this purpose so that developers can link the
limitation to its source and eventually kill it some day by dynamically
allocating this array (if ever desired).
Link: https://lore.kernel.org/all/CALO=DHFB+yBoXxVr5KcsK0iFdg+e7ywko4-e+72kjbcS8JBfPw@mail.gmail.com
Reported-by: Jincheng Wang <[email protected]>
Signed-off-by: Miquel Raynal <[email protected]>
Tested-by: Jincheng Wang <[email protected]>
|
proto_all_finfos(proto_tree *tree)
{
ffdata_t ffdata;
/* Pre allocate enough space to hold all fields in most cases */
ffdata.array = g_ptr_array_sized_new(512);
ffdata.id = 0;
proto_tree_traverse_pre_order(tree, every_finfo, &ffdata);
return ffdata.array;
}
| 0 |
[
"CWE-401"
] |
wireshark
|
a9fc769d7bb4b491efb61c699d57c9f35269d871
| 103,353,862,417,262,170,000,000,000,000,000,000,000 | 12 |
epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032.
|
mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
{
struct RBasic *p;
static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
mrb_gc *gc = &mrb->gc;
if (cls) {
enum mrb_vtype tt;
switch (cls->tt) {
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
case MRB_TT_MODULE:
case MRB_TT_ENV:
break;
default:
mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
}
tt = MRB_INSTANCE_TT(cls);
if (tt != MRB_TT_FALSE &&
ttype != MRB_TT_SCLASS &&
ttype != MRB_TT_ICLASS &&
ttype != MRB_TT_ENV &&
ttype != tt) {
mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %S", mrb_obj_value(cls));
}
}
#ifdef MRB_GC_STRESS
mrb_full_gc(mrb);
#endif
if (gc->threshold < gc->live) {
mrb_incremental_gc(mrb);
}
if (gc->free_heaps == NULL) {
add_heap(mrb, gc);
}
p = gc->free_heaps->freelist;
gc->free_heaps->freelist = ((struct free_obj*)p)->next;
if (gc->free_heaps->freelist == NULL) {
unlink_free_heap_page(gc, gc->free_heaps);
}
gc->live++;
gc_protect(mrb, gc, p);
*(RVALUE *)p = RVALUE_zero;
p->tt = ttype;
p->c = cls;
paint_partial_white(gc, p);
return p;
}
| 0 |
[
"CWE-416"
] |
mruby
|
5c114c91d4ff31859fcd84cf8bf349b737b90d99
| 330,675,889,607,906,550,000,000,000,000,000,000,000 | 52 |
Clear unused stack region that may refer freed objects; fix #3596
|
int imap_mailbox_status(struct Mailbox *m, bool queue)
{
struct ImapAccountData *adata = imap_adata_get(m);
struct ImapMboxData *mdata = imap_mdata_get(m);
if (!adata || !mdata)
return -1;
return imap_status(adata, mdata, queue);
}
| 0 |
[
"CWE-94",
"CWE-74"
] |
neomutt
|
fb013ec666759cb8a9e294347c7b4c1f597639cc
| 280,273,820,675,254,520,000,000,000,000,000,000,000 | 8 |
tls: clear data after a starttls acknowledgement
After a starttls acknowledgement message, clear the buffers of any
incoming data / commands. This will ensure that all future data is
handled securely.
Co-authored-by: Pietro Cerutti <[email protected]>
|
void CClient::OnDemoPlayerMessage(void *pData, int Size)
{
CUnpacker Unpacker;
Unpacker.Reset(pData, Size);
// unpack msgid and system flag
int Msg = Unpacker.GetInt();
int Sys = Msg&1;
Msg >>= 1;
if(Unpacker.Error())
return;
if(!Sys)
GameClient()->OnMessage(Msg, &Unpacker);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
teeworlds
|
ff254722a2683867fcb3e67569ffd36226c4bc62
| 241,437,844,156,292,960,000,000,000,000,000,000,000 | 16 |
added some checks to snap handling
|
static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
{
int rc;
BNX2X_DEV_INFO("Uncommon unload Flow\n");
/* Test if previous unload process was already finished for this path */
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
BNX2X_DEV_INFO("Path is unmarked\n");
/* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
if (bnx2x_prev_is_after_undi(bp))
goto out;
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
if (!rc) {
/* fw version is good */
BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
rc = bnx2x_do_flr(bp);
}
if (!rc) {
/* FLR was performed */
BNX2X_DEV_INFO("FLR successful\n");
return 0;
}
BNX2X_DEV_INFO("Could not FLR\n");
out:
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
if (!rc)
rc = BNX2X_PREV_WAIT_NEEDED;
return rc;
}
| 0 |
[
"CWE-20"
] |
linux
|
8914a595110a6eca69a5e275b323f5d09e18f4f9
| 148,653,065,233,887,640,000,000,000,000,000,000,000 | 44 |
bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int ssl3_enc(SSL *s, int send)
{
SSL3_RECORD *rec;
EVP_CIPHER_CTX *ds;
unsigned long l;
int bs,i,mac_size=0;
const EVP_CIPHER *enc;
if (send)
{
ds=s->enc_write_ctx;
rec= &(s->s3->wrec);
if (s->enc_write_ctx == NULL)
enc=NULL;
else
enc=EVP_CIPHER_CTX_cipher(s->enc_write_ctx);
}
else
{
ds=s->enc_read_ctx;
rec= &(s->s3->rrec);
if (s->enc_read_ctx == NULL)
enc=NULL;
else
enc=EVP_CIPHER_CTX_cipher(s->enc_read_ctx);
}
if ((s->session == NULL) || (ds == NULL) ||
(enc == NULL))
{
memmove(rec->data,rec->input,rec->length);
rec->input=rec->data;
}
else
{
l=rec->length;
bs=EVP_CIPHER_block_size(ds->cipher);
/* COMPRESS */
if ((bs != 1) && send)
{
i=bs-((int)l%bs);
/* we need to add 'i-1' padding bytes */
l+=i;
/* the last of these zero bytes will be overwritten
* with the padding length. */
memset(&rec->input[rec->length], 0, i);
rec->length+=i;
rec->input[l-1]=(i-1);
}
if (!send)
{
if (l == 0 || l%bs != 0)
{
SSLerr(SSL_F_SSL3_ENC,SSL_R_BLOCK_CIPHER_PAD_IS_WRONG);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_DECRYPTION_FAILED);
return 0;
}
/* otherwise, rec->length >= bs */
}
EVP_Cipher(ds,rec->data,rec->input,l);
if (EVP_MD_CTX_md(s->read_hash) != NULL)
mac_size = EVP_MD_CTX_size(s->read_hash);
if ((bs != 1) && !send)
return ssl3_cbc_remove_padding(s, rec, bs, mac_size);
}
return(1);
}
| 1 |
[
"CWE-310"
] |
openssl
|
b23da2919b332fd83fa6de87caacb0651f64a3f5
| 25,721,716,236,025,656,000,000,000,000,000,000,000 | 73 |
Update DTLS code to match CBC decoding in TLS.
This change updates the DTLS code to match the constant-time CBC
behaviour in the TLS.
(cherry picked from commit 9f27de170d1b7bef3d46d41382dc4dafde8b3900)
|
int enc_untrusted_sys_futex_wait(int32_t *futex, int32_t expected,
int64_t timeout_microsec) {
if (!TrustedPrimitives::IsOutsideEnclave(futex, sizeof(int32_t))) {
TrustedPrimitives::BestEffortAbort(
"enc_untrusted_sys_futex_wait: futex word should be in untrusted "
"local memory.");
}
MessageWriter input;
MessageReader output;
input.Push<uint64_t>(reinterpret_cast<uint64_t>(futex));
input.Push<int32_t>(expected);
input.Push<int64_t>(timeout_microsec);
const auto status = NonSystemCallDispatcher(
::asylo::host_call::kSysFutexWaitHandler, &input, &output);
CheckStatusAndParamCount(status, output, "enc_untrusted_sys_futex_wait", 2);
int result = output.next<int>();
int klinux_errno = output.next<int>();
// If FUTEX_WAIT successfully causes the thread to be suspended in the kernel,
// it returns a zero when the caller is woken up. Otherwise, it returns the
// appropriate errno.
if (result != 0) {
errno = FromkLinuxErrorNumber(klinux_errno);
}
return result;
}
| 0 |
[
"CWE-787"
] |
asylo
|
a37fb6a0e7daf30134dbbf357c9a518a1026aa02
| 219,272,135,699,476,300,000,000,000,000,000,000,000 | 27 |
Check untrusted queue is in outside enclave
PiperOrigin-RevId: 333370935
Change-Id: Ic3f15d5db1302d95c7cb199b44172474fecb81ca
|
relay_websocket_encode_frame (int opcode,
const char *buffer,
unsigned long long length,
unsigned long long *length_frame)
{
unsigned char *frame;
unsigned long long index;
*length_frame = 0;
frame = malloc (length + 10);
if (!frame)
return NULL;
frame[0] = 0x80;
frame[0] |= opcode;
if (length <= 125)
{
/* length on one byte */
frame[1] = length;
index = 2;
}
else if (length <= 65535)
{
/* length on 2 bytes */
frame[1] = 126;
frame[2] = (length >> 8) & 0xFF;
frame[3] = length & 0xFF;
index = 4;
}
else
{
/* length on 8 bytes */
frame[1] = 127;
frame[2] = (length >> 56) & 0xFF;
frame[3] = (length >> 48) & 0xFF;
frame[4] = (length >> 40) & 0xFF;
frame[5] = (length >> 32) & 0xFF;
frame[6] = (length >> 24) & 0xFF;
frame[7] = (length >> 16) & 0xFF;
frame[8] = (length >> 8) & 0xFF;
frame[9] = length & 0xFF;
index = 10;
}
/* copy buffer after length */
memcpy (frame + index, buffer, length);
*length_frame = index + length;
return (char *)frame;
}
| 0 |
[
"CWE-125"
] |
weechat
|
8b1331f98de1714bae15a9ca2e2b393ba49d735b
| 268,488,439,287,964,100,000,000,000,000,000,000,000 | 53 |
relay: fix crash when decoding a malformed websocket frame
|
static MagickBooleanType WriteOnePNGImage(MngInfo *mng_info,
const ImageInfo *image_info,Image *image)
{
char
s[2];
char
im_vers[32],
libpng_runv[32],
libpng_vers[32],
zlib_runv[32],
zlib_vers[32];
const char
*name,
*property,
*value;
const StringInfo
*profile;
int
num_passes,
pass,
ping_wrote_caNv;
png_byte
ping_trans_alpha[256];
png_color
palette[257];
png_color_16
ping_background,
ping_trans_color;
png_info
*ping_info;
png_struct
*ping;
png_uint_32
ping_height,
ping_width;
ssize_t
y;
MagickBooleanType
image_matte,
logging,
matte,
ping_have_blob,
ping_have_cheap_transparency,
ping_have_color,
ping_have_non_bw,
ping_have_PLTE,
ping_have_bKGD,
ping_have_eXIf,
ping_have_iCCP,
ping_have_pHYs,
ping_have_sRGB,
ping_have_tRNS,
ping_exclude_bKGD,
ping_exclude_cHRM,
ping_exclude_date,
/* ping_exclude_EXIF, */
ping_exclude_eXIf,
ping_exclude_gAMA,
ping_exclude_iCCP,
/* ping_exclude_iTXt, */
ping_exclude_oFFs,
ping_exclude_pHYs,
ping_exclude_sRGB,
ping_exclude_tEXt,
ping_exclude_tIME,
/* ping_exclude_tRNS, */
ping_exclude_caNv,
ping_exclude_zCCP, /* hex-encoded iCCP */
ping_exclude_zTXt,
ping_preserve_colormap,
ping_preserve_iCCP,
ping_need_colortype_warning,
status,
tried_332,
tried_333,
tried_444;
MemoryInfo
*volatile pixel_info;
QuantumInfo
*quantum_info;
register ssize_t
i,
x;
unsigned char
*ping_pixels;
volatile int
image_colors,
ping_bit_depth,
ping_color_type,
ping_interlace_method,
ping_compression_method,
ping_filter_method,
ping_num_trans;
volatile size_t
image_depth,
old_bit_depth;
size_t
quality,
rowbytes,
save_image_depth;
int
j,
number_colors,
number_opaque,
number_semitransparent,
number_transparent,
ping_pHYs_unit_type;
png_uint_32
ping_pHYs_x_resolution,
ping_pHYs_y_resolution;
logging=LogMagickEvent(CoderEvent,GetMagickModule(),
" Enter WriteOnePNGImage()");
/* Define these outside of the following "if logging()" block so they will
* show in debuggers.
*/
*im_vers='\0';
(void) ConcatenateMagickString(im_vers,
MagickLibVersionText,MaxTextExtent);
(void) ConcatenateMagickString(im_vers,
MagickLibAddendum,MaxTextExtent);
*libpng_vers='\0';
(void) ConcatenateMagickString(libpng_vers,
PNG_LIBPNG_VER_STRING,32);
*libpng_runv='\0';
(void) ConcatenateMagickString(libpng_runv,
png_get_libpng_ver(NULL),32);
*zlib_vers='\0';
(void) ConcatenateMagickString(zlib_vers,
ZLIB_VERSION,32);
*zlib_runv='\0';
(void) ConcatenateMagickString(zlib_runv,
zlib_version,32);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" IM version = %s", im_vers);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Libpng version = %s", libpng_vers);
if (LocaleCompare(libpng_vers,libpng_runv) != 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" running with %s", libpng_runv);
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Zlib version = %s", zlib_vers);
if (LocaleCompare(zlib_vers,zlib_runv) != 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" running with %s", zlib_runv);
}
}
/* Initialize some stuff */
ping_bit_depth=0,
ping_color_type=0,
ping_interlace_method=0,
ping_compression_method=0,
ping_filter_method=0,
ping_num_trans = 0;
ping_background.red = 0;
ping_background.green = 0;
ping_background.blue = 0;
ping_background.gray = 0;
ping_background.index = 0;
ping_trans_color.red=0;
ping_trans_color.green=0;
ping_trans_color.blue=0;
ping_trans_color.gray=0;
ping_pHYs_unit_type = 0;
ping_pHYs_x_resolution = 0;
ping_pHYs_y_resolution = 0;
ping_have_blob=MagickFalse;
ping_have_cheap_transparency=MagickFalse;
ping_have_color=MagickTrue;
ping_have_non_bw=MagickTrue;
ping_have_PLTE=MagickFalse;
ping_have_bKGD=MagickFalse;
ping_have_eXIf=MagickTrue;
ping_have_iCCP=MagickFalse;
ping_have_pHYs=MagickFalse;
ping_have_sRGB=MagickFalse;
ping_have_tRNS=MagickFalse;
ping_exclude_bKGD=mng_info->ping_exclude_bKGD;
ping_exclude_caNv=mng_info->ping_exclude_caNv;
ping_exclude_cHRM=mng_info->ping_exclude_cHRM;
ping_exclude_date=mng_info->ping_exclude_date;
/* ping_exclude_EXIF=mng_info->ping_exclude_EXIF; */
ping_exclude_eXIf=mng_info->ping_exclude_eXIf;
ping_exclude_gAMA=mng_info->ping_exclude_gAMA;
ping_exclude_iCCP=mng_info->ping_exclude_iCCP;
/* ping_exclude_iTXt=mng_info->ping_exclude_iTXt; */
ping_exclude_oFFs=mng_info->ping_exclude_oFFs;
ping_exclude_pHYs=mng_info->ping_exclude_pHYs;
ping_exclude_sRGB=mng_info->ping_exclude_sRGB;
ping_exclude_tEXt=mng_info->ping_exclude_tEXt;
ping_exclude_tIME=mng_info->ping_exclude_tIME;
/* ping_exclude_tRNS=mng_info->ping_exclude_tRNS; */
ping_exclude_zCCP=mng_info->ping_exclude_zCCP; /* hex-encoded iCCP in zTXt */
ping_exclude_zTXt=mng_info->ping_exclude_zTXt;
ping_preserve_colormap = mng_info->ping_preserve_colormap;
ping_preserve_iCCP = mng_info->ping_preserve_iCCP;
ping_need_colortype_warning = MagickFalse;
property=(const char *) NULL;
/* Recognize the ICC sRGB profile and convert it to the sRGB chunk,
* i.e., eliminate the ICC profile and set image->rendering_intent.
* Note that this will not involve any changes to the actual pixels
* but merely passes information to applications that read the resulting
* PNG image.
*
* To do: recognize other variants of the sRGB profile, using the CRC to
* verify all recognized variants including the 7 already known.
*
* Work around libpng16+ rejecting some "known invalid sRGB profiles".
*
* Use something other than image->rendering_intent to record the fact
* that the sRGB profile was found.
*
* Record the ICC version (currently v2 or v4) of the incoming sRGB ICC
* profile. Record the Blackpoint Compensation, if any.
*/
if (ping_exclude_sRGB == MagickFalse && ping_preserve_iCCP == MagickFalse)
{
char
*name;
const StringInfo
*profile;
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
if ((LocaleCompare(name,"ICC") == 0) ||
(LocaleCompare(name,"ICM") == 0))
{
int
icheck,
got_crc=0;
png_uint_32
length,
profile_crc=0;
unsigned char
*data;
length=(png_uint_32) GetStringInfoLength(profile);
for (icheck=0; sRGB_info[icheck].len > 0; icheck++)
{
if (length == sRGB_info[icheck].len)
{
if (got_crc == 0)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Got a %lu-byte ICC profile (potentially sRGB)",
(unsigned long) length);
data=GetStringInfoDatum(profile);
profile_crc=crc32(0,data,length);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" with crc=%8x",(unsigned int) profile_crc);
got_crc++;
}
if (profile_crc == sRGB_info[icheck].crc)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" It is sRGB with rendering intent = %s",
Magick_RenderingIntentString_from_PNG_RenderingIntent(
sRGB_info[icheck].intent));
if (image->rendering_intent==UndefinedIntent)
{
image->rendering_intent=
Magick_RenderingIntent_from_PNG_RenderingIntent(
sRGB_info[icheck].intent);
}
ping_exclude_iCCP = MagickTrue;
ping_exclude_zCCP = MagickTrue;
ping_have_sRGB = MagickTrue;
break;
}
}
}
if (sRGB_info[icheck].len == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Got a %lu-byte ICC profile not recognized as sRGB",
(unsigned long) length);
}
}
name=GetNextImageProfile(image);
}
}
number_opaque = 0;
number_semitransparent = 0;
number_transparent = 0;
if (logging != MagickFalse)
{
if (image->storage_class == UndefinedClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->storage_class=UndefinedClass");
if (image->storage_class == DirectClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->storage_class=DirectClass");
if (image->storage_class == PseudoClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->storage_class=PseudoClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image_info->magick= %s",image_info->magick);
(void) LogMagickEvent(CoderEvent,GetMagickModule(), image->taint ?
" image->taint=MagickTrue":
" image->taint=MagickFalse");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->gamma=%g", image->gamma);
}
if (image->storage_class == PseudoClass &&
(mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32 ||
mng_info->write_png48 || mng_info->write_png64 ||
(mng_info->write_png_colortype != 1 &&
mng_info->write_png_colortype != 5)))
{
(void) SyncImage(image);
image->storage_class = DirectClass;
}
if (ping_preserve_colormap == MagickFalse)
{
if (image->storage_class != PseudoClass && image->colormap != NULL)
{
/* Free the bogus colormap; it can cause trouble later */
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Freeing bogus colormap");
(void) RelinquishMagickMemory(image->colormap);
image->colormap=NULL;
}
}
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace);
/*
Sometimes we get PseudoClass images whose RGB values don't match
the colors in the colormap. This code syncs the RGB values.
*/
if (image->depth <= 8 && image->taint && image->storage_class == PseudoClass)
(void) SyncImage(image);
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (image->depth > 8)
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reducing PNG bit depth to 8 since this is a Q8 build.");
image->depth=8;
}
#endif
/* Respect the -depth option */
if (image->depth < 4)
{
register PixelPacket
*r;
ExceptionInfo
*exception;
exception=(&image->exception);
if (image->depth > 2)
{
/* Scale to 4-bit */
LBR04PacketRGBO(image->background_color);
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
LBR04PixelRGBO(r);
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->storage_class == PseudoClass && image->colormap != NULL)
{
for (i=0; i < (ssize_t) image->colors; i++)
{
LBR04PacketRGBO(image->colormap[i]);
}
}
}
else if (image->depth > 1)
{
/* Scale to 2-bit */
LBR02PacketRGBO(image->background_color);
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
LBR02PixelRGBO(r);
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->storage_class == PseudoClass && image->colormap != NULL)
{
for (i=0; i < (ssize_t) image->colors; i++)
{
LBR02PacketRGBO(image->colormap[i]);
}
}
}
else
{
/* Scale to 1-bit */
LBR01PacketRGBO(image->background_color);
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
LBR01PixelRGBO(r);
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (image->storage_class == PseudoClass && image->colormap != NULL)
{
for (i=0; i < (ssize_t) image->colors; i++)
{
LBR01PacketRGBO(image->colormap[i]);
}
}
}
}
/* To do: set to next higher multiple of 8 */
if (image->depth < 8)
image->depth=8;
#if (MAGICKCORE_QUANTUM_DEPTH > 16)
/* PNG does not handle depths greater than 16 so reduce it even
* if lossy
*/
if (image->depth > 8)
image->depth=16;
#endif
#if (MAGICKCORE_QUANTUM_DEPTH > 8)
if (image->depth > 8)
{
/* To do: fill low byte properly */
image->depth=16;
}
if (image->depth == 16 && mng_info->write_png_depth != 16)
if (mng_info->write_png8 || LosslessReduceDepthOK(image) != MagickFalse)
image->depth = 8;
#endif
image_colors = (int) image->colors;
if (mng_info->write_png_colortype &&
(mng_info->write_png_colortype > 4 || (mng_info->write_png_depth >= 8 &&
mng_info->write_png_colortype < 4 && image->matte == MagickFalse)))
{
/* Avoid the expensive BUILD_PALETTE operation if we're sure that we
* are not going to need the result.
*/
number_opaque = (int) image->colors;
if (mng_info->write_png_colortype == 1 ||
mng_info->write_png_colortype == 5)
ping_have_color=MagickFalse;
else
ping_have_color=MagickTrue;
ping_have_non_bw=MagickFalse;
if (image->matte != MagickFalse)
{
number_transparent = 2;
number_semitransparent = 1;
}
else
{
number_transparent = 0;
number_semitransparent = 0;
}
}
if (mng_info->write_png_colortype < 7)
{
/* BUILD_PALETTE
*
* Normally we run this just once, but in the case of writing PNG8
* we reduce the transparency to binary and run again, then if there
* are still too many colors we reduce to a simple 4-4-4-1, then 3-3-3-1
* RGBA palette and run again, and then to a simple 3-3-2-1 RGBA
* palette. Then (To do) we take care of a final reduction that is only
* needed if there are still 256 colors present and one of them has both
* transparent and opaque instances.
*/
tried_332 = MagickFalse;
tried_333 = MagickFalse;
tried_444 = MagickFalse;
for (j=0; j<6; j++)
{
/*
* Sometimes we get DirectClass images that have 256 colors or fewer.
* This code will build a colormap.
*
* Also, sometimes we get PseudoClass images with an out-of-date
* colormap. This code will replace the colormap with a new one.
* Sometimes we get PseudoClass images that have more than 256 colors.
* This code will delete the colormap and change the image to
* DirectClass.
*
* If image->matte is MagickFalse, we ignore the opacity channel
* even though it sometimes contains left-over non-opaque values.
*
* Also we gather some information (number of opaque, transparent,
* and semitransparent pixels, and whether the image has any non-gray
* pixels or only black-and-white pixels) that we might need later.
*
* Even if the user wants to force GrayAlpha or RGBA (colortype 4 or 6)
* we need to check for bogus non-opaque values, at least.
*/
ExceptionInfo
*exception;
int
n;
PixelPacket
opaque[260],
semitransparent[260],
transparent[260];
register IndexPacket
*indexes;
register const PixelPacket
*s,
*q;
register PixelPacket
*r;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Enter BUILD_PALETTE:");
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->columns=%.20g",(double) image->columns);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->rows=%.20g",(double) image->rows);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->matte=%.20g",(double) image->matte);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->depth=%.20g",(double) image->depth);
if (image->storage_class == PseudoClass && image->colormap != NULL)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Original colormap:");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" i (red,green,blue,opacity)");
for (i=0; i < 256; i++)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" %d (%d,%d,%d,%d)",
(int) i,
(int) image->colormap[i].red,
(int) image->colormap[i].green,
(int) image->colormap[i].blue,
(int) image->colormap[i].opacity);
}
for (i=image->colors - 10; i < (ssize_t) image->colors; i++)
{
if (i > 255)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" %d (%d,%d,%d,%d)",
(int) i,
(int) image->colormap[i].red,
(int) image->colormap[i].green,
(int) image->colormap[i].blue,
(int) image->colormap[i].opacity);
}
}
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->colors=%d",(int) image->colors);
if (image->colors == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" (zero means unknown)");
if (ping_preserve_colormap == MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Regenerate the colormap");
}
exception=(&image->exception);
image_colors=0;
number_opaque = 0;
number_semitransparent = 0;
number_transparent = 0;
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte == MagickFalse ||
GetPixelOpacity(q) == OpaqueOpacity)
{
if (number_opaque < 259)
{
if (number_opaque == 0)
{
GetPixelRGB(q, opaque);
opaque[0].opacity=OpaqueOpacity;
number_opaque=1;
}
for (i=0; i< (ssize_t) number_opaque; i++)
{
if (IsColorEqual(q, opaque+i))
break;
}
if (i == (ssize_t) number_opaque &&
number_opaque < 259)
{
number_opaque++;
GetPixelRGB(q, opaque+i);
opaque[i].opacity=OpaqueOpacity;
}
}
}
else if (q->opacity == TransparentOpacity)
{
if (number_transparent < 259)
{
if (number_transparent == 0)
{
GetPixelRGBO(q, transparent);
ping_trans_color.red=
(unsigned short) GetPixelRed(q);
ping_trans_color.green=
(unsigned short) GetPixelGreen(q);
ping_trans_color.blue=
(unsigned short) GetPixelBlue(q);
ping_trans_color.gray=
(unsigned short) GetPixelRed(q);
number_transparent = 1;
}
for (i=0; i< (ssize_t) number_transparent; i++)
{
if (IsColorEqual(q, transparent+i))
break;
}
if (i == (ssize_t) number_transparent &&
number_transparent < 259)
{
number_transparent++;
GetPixelRGBO(q, transparent+i);
}
}
}
else
{
if (number_semitransparent < 259)
{
if (number_semitransparent == 0)
{
GetPixelRGBO(q, semitransparent);
number_semitransparent = 1;
}
for (i=0; i< (ssize_t) number_semitransparent; i++)
{
if (IsColorEqual(q, semitransparent+i)
&& GetPixelOpacity(q) ==
semitransparent[i].opacity)
break;
}
if (i == (ssize_t) number_semitransparent &&
number_semitransparent < 259)
{
number_semitransparent++;
GetPixelRGBO(q, semitransparent+i);
}
}
}
q++;
}
}
if (mng_info->write_png8 == MagickFalse &&
ping_exclude_bKGD == MagickFalse)
{
/* Add the background color to the palette, if it
* isn't already there.
*/
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Check colormap for background (%d,%d,%d)",
(int) image->background_color.red,
(int) image->background_color.green,
(int) image->background_color.blue);
}
for (i=0; i<number_opaque; i++)
{
if (opaque[i].red == image->background_color.red &&
opaque[i].green == image->background_color.green &&
opaque[i].blue == image->background_color.blue)
break;
}
if (number_opaque < 259 && i == number_opaque)
{
opaque[i] = image->background_color;
ping_background.index = i;
number_opaque++;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" background_color index is %d",(int) i);
}
}
else if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" No room in the colormap to add background color");
}
image_colors=number_opaque+number_transparent+number_semitransparent;
if (logging != MagickFalse)
{
if (image_colors > 256)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has more than 256 colors");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has %d colors",image_colors);
}
if (ping_preserve_colormap != MagickFalse)
break;
if (mng_info->write_png_colortype != 7) /* We won't need this info */
{
ping_have_color=MagickFalse;
ping_have_non_bw=MagickFalse;
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"incompatible colorspace");
ping_have_color=MagickTrue;
ping_have_non_bw=MagickTrue;
}
if(image_colors > 256)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
s=q;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelRed(s) != GetPixelGreen(s) ||
GetPixelRed(s) != GetPixelBlue(s))
{
ping_have_color=MagickTrue;
ping_have_non_bw=MagickTrue;
break;
}
s++;
}
if (ping_have_color != MagickFalse)
break;
/* Worst case is black-and-white; we are looking at every
* pixel twice.
*/
if (ping_have_non_bw == MagickFalse)
{
s=q;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelRed(s) != 0 &&
GetPixelRed(s) != QuantumRange)
{
ping_have_non_bw=MagickTrue;
break;
}
s++;
}
}
}
}
}
if (image_colors < 257)
{
PixelPacket
colormap[260];
/*
* Initialize image colormap.
*/
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Sort the new colormap");
/* Sort palette, transparent first */;
n = 0;
for (i=0; i<number_transparent; i++)
colormap[n++] = transparent[i];
for (i=0; i<number_semitransparent; i++)
colormap[n++] = semitransparent[i];
for (i=0; i<number_opaque; i++)
colormap[n++] = opaque[i];
ping_background.index +=
(number_transparent + number_semitransparent);
/* image_colors < 257; search the colormap instead of the pixels
* to get ping_have_color and ping_have_non_bw
*/
for (i=0; i<n; i++)
{
if (ping_have_color == MagickFalse)
{
if (colormap[i].red != colormap[i].green ||
colormap[i].red != colormap[i].blue)
{
ping_have_color=MagickTrue;
ping_have_non_bw=MagickTrue;
break;
}
}
if (ping_have_non_bw == MagickFalse)
{
if (colormap[i].red != 0 && colormap[i].red != QuantumRange)
ping_have_non_bw=MagickTrue;
}
}
if ((mng_info->ping_exclude_tRNS == MagickFalse ||
(number_transparent == 0 && number_semitransparent == 0)) &&
(((mng_info->write_png_colortype-1) ==
PNG_COLOR_TYPE_PALETTE) ||
(mng_info->write_png_colortype == 0)))
{
if (logging != MagickFalse)
{
if (n != (ssize_t) image_colors)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image_colors (%d) and n (%d) don't match",
image_colors, n);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" AcquireImageColormap");
}
image->colors = image_colors;
if (AcquireImageColormap(image,image_colors) == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
for (i=0; i< (ssize_t) image_colors; i++)
image->colormap[i] = colormap[i];
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->colors=%d (%d)",
(int) image->colors, image_colors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Update the pixel indexes");
}
/* Sync the pixel indices with the new colormap */
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i< (ssize_t) image_colors; i++)
{
if ((image->matte == MagickFalse ||
image->colormap[i].opacity ==
GetPixelOpacity(q)) &&
image->colormap[i].red ==
GetPixelRed(q) &&
image->colormap[i].green ==
GetPixelGreen(q) &&
image->colormap[i].blue ==
GetPixelBlue(q))
{
SetPixelIndex(indexes+x,i);
break;
}
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->colors=%d", (int) image->colors);
if (image->colormap != NULL)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" i (red,green,blue,opacity)");
for (i=0; i < (ssize_t) image->colors; i++)
{
if (i < 300 || i >= (ssize_t) image->colors - 10)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" %d (%d,%d,%d,%d)",
(int) i,
(int) image->colormap[i].red,
(int) image->colormap[i].green,
(int) image->colormap[i].blue,
(int) image->colormap[i].opacity);
}
}
}
if (number_transparent < 257)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" number_transparent = %d",
number_transparent);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" number_transparent > 256");
if (number_opaque < 257)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" number_opaque = %d",
number_opaque);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" number_opaque > 256");
if (number_semitransparent < 257)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" number_semitransparent = %d",
number_semitransparent);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" number_semitransparent > 256");
if (ping_have_non_bw == MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" All pixels and the background are black or white");
else if (ping_have_color == MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" All pixels and the background are gray");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" At least one pixel or the background is non-gray");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Exit BUILD_PALETTE:");
}
if (mng_info->write_png8 == MagickFalse)
break;
/* Make any reductions necessary for the PNG8 format */
if (image_colors <= 256 &&
image_colors != 0 && image->colormap != NULL &&
number_semitransparent == 0 &&
number_transparent <= 1)
break;
/* PNG8 can't have semitransparent colors so we threshold the
* opacity to 0 or OpaqueOpacity, and PNG8 can only have one
* transparent color so if more than one is transparent we merge
* them into image->background_color.
*/
if (number_semitransparent != 0 || number_transparent > 1)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Thresholding the alpha channel to binary");
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(r) > TransparentOpacity/2)
{
SetPixelOpacity(r,TransparentOpacity);
SetPixelRgb(r,&image->background_color);
}
else
SetPixelOpacity(r,OpaqueOpacity);
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image_colors != 0 && image_colors <= 256 &&
image->colormap != NULL)
for (i=0; i<image_colors; i++)
image->colormap[i].opacity =
(image->colormap[i].opacity > TransparentOpacity/2 ?
TransparentOpacity : OpaqueOpacity);
}
continue;
}
/* PNG8 can't have more than 256 colors so we quantize the pixels and
* background color to the 4-4-4-1, 3-3-3-1 or 3-3-2-1 palette. If the
* image is mostly gray, the 4-4-4-1 palette is likely to end up with 256
* colors or less.
*/
if (tried_444 == MagickFalse && (image_colors == 0 || image_colors > 256))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the background color to 4-4-4");
tried_444 = MagickTrue;
LBR04PacketRGB(image->background_color);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the pixel colors to 4-4-4");
if (image->colormap == NULL)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(r) == OpaqueOpacity)
LBR04PixelRGB(r);
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
else /* Should not reach this; colormap already exists and
must be <= 256 */
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the colormap to 4-4-4");
for (i=0; i<image_colors; i++)
{
LBR04PacketRGB(image->colormap[i]);
}
}
continue;
}
if (tried_333 == MagickFalse && (image_colors == 0 || image_colors > 256))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the background color to 3-3-3");
tried_333 = MagickTrue;
LBR03PacketRGB(image->background_color);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the pixel colors to 3-3-3-1");
if (image->colormap == NULL)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(r) == OpaqueOpacity)
LBR03PixelRGB(r);
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
else /* Should not reach this; colormap already exists and
must be <= 256 */
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the colormap to 3-3-3-1");
for (i=0; i<image_colors; i++)
{
LBR03PacketRGB(image->colormap[i]);
}
}
continue;
}
if (tried_332 == MagickFalse && (image_colors == 0 || image_colors > 256))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the background color to 3-3-2");
tried_332 = MagickTrue;
/* Red and green were already done so we only quantize the blue
* channel
*/
LBR02PacketBlue(image->background_color);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the pixel colors to 3-3-2-1");
if (image->colormap == NULL)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(r) == OpaqueOpacity)
LBR02PixelBlue(r);
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
else /* Should not reach this; colormap already exists and
must be <= 256 */
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Quantizing the colormap to 3-3-2-1");
for (i=0; i<image_colors; i++)
{
LBR02PacketBlue(image->colormap[i]);
}
}
continue;
}
if (image_colors == 0 || image_colors > 256)
{
/* Take care of special case with 256 opaque colors + 1 transparent
* color. We don't need to quantize to 2-3-2-1; we only need to
* eliminate one color, so we'll merge the two darkest red
* colors (0x49, 0, 0) -> (0x24, 0, 0).
*/
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Merging two dark red background colors to 3-3-2-1");
if (ScaleQuantumToChar(image->background_color.red) == 0x49 &&
ScaleQuantumToChar(image->background_color.green) == 0x00 &&
ScaleQuantumToChar(image->background_color.blue) == 0x00)
{
image->background_color.red=ScaleCharToQuantum(0x24);
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Merging two dark red pixel colors to 3-3-2-1");
if (image->colormap == NULL)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
r=GetAuthenticPixels(image,0,y,image->columns,1,
exception);
if (r == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (ScaleQuantumToChar(GetPixelRed(r)) == 0x49 &&
ScaleQuantumToChar(GetPixelGreen(r)) == 0x00 &&
ScaleQuantumToChar(GetPixelBlue(r)) == 0x00 &&
GetPixelOpacity(r) == OpaqueOpacity)
{
SetPixelRed(r,ScaleCharToQuantum(0x24));
}
r++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
else
{
for (i=0; i<image_colors; i++)
{
if (ScaleQuantumToChar(image->colormap[i].red) == 0x49 &&
ScaleQuantumToChar(image->colormap[i].green) == 0x00 &&
ScaleQuantumToChar(image->colormap[i].blue) == 0x00)
{
image->colormap[i].red=ScaleCharToQuantum(0x24);
}
}
}
}
}
}
/* END OF BUILD_PALETTE */
/* If we are excluding the tRNS chunk and there is transparency,
* then we must write a Gray-Alpha (color-type 4) or RGBA (color-type 6)
* PNG.
*/
if (mng_info->ping_exclude_tRNS != MagickFalse &&
(number_transparent != 0 || number_semitransparent != 0))
{
unsigned int colortype=mng_info->write_png_colortype;
if (ping_have_color == MagickFalse)
mng_info->write_png_colortype = 5;
else
mng_info->write_png_colortype = 7;
if (colortype != 0 &&
mng_info->write_png_colortype != colortype)
ping_need_colortype_warning=MagickTrue;
}
/* See if cheap transparency is possible. It is only possible
* when there is a single transparent color, no semitransparent
* color, and no opaque color that has the same RGB components
* as the transparent color. We only need this information if
* we are writing a PNG with colortype 0 or 2, and we have not
* excluded the tRNS chunk.
*/
if (number_transparent == 1 &&
mng_info->write_png_colortype < 4)
{
ping_have_cheap_transparency = MagickTrue;
if (number_semitransparent != 0)
ping_have_cheap_transparency = MagickFalse;
else if (image_colors == 0 || image_colors > 256 ||
image->colormap == NULL)
{
ExceptionInfo
*exception;
register const PixelPacket
*q;
exception=(&image->exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetVirtualPixels(image,0,y,image->columns,1, exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (q->opacity != TransparentOpacity &&
(unsigned short) GetPixelRed(q) ==
ping_trans_color.red &&
(unsigned short) GetPixelGreen(q) ==
ping_trans_color.green &&
(unsigned short) GetPixelBlue(q) ==
ping_trans_color.blue)
{
ping_have_cheap_transparency = MagickFalse;
break;
}
q++;
}
if (ping_have_cheap_transparency == MagickFalse)
break;
}
}
else
{
/* Assuming that image->colormap[0] is the one transparent color
* and that all others are opaque.
*/
if (image_colors > 1)
for (i=1; i<image_colors; i++)
if (image->colormap[i].red == image->colormap[0].red &&
image->colormap[i].green == image->colormap[0].green &&
image->colormap[i].blue == image->colormap[0].blue)
{
ping_have_cheap_transparency = MagickFalse;
break;
}
}
if (logging != MagickFalse)
{
if (ping_have_cheap_transparency == MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Cheap transparency is not possible.");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Cheap transparency is possible.");
}
}
else
ping_have_cheap_transparency = MagickFalse;
image_depth=image->depth;
quantum_info = (QuantumInfo *) NULL;
number_colors=0;
image_colors=(int) image->colors;
image_matte=image->matte;
if (mng_info->write_png_colortype < 5)
mng_info->IsPalette=image->storage_class == PseudoClass &&
image_colors <= 256 && image->colormap != NULL;
else
mng_info->IsPalette = MagickFalse;
if ((mng_info->write_png_colortype == 4 || mng_info->write_png8) &&
(image->colors == 0 || image->colormap == NULL))
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),CoderError,
"Cannot write PNG8 or color-type 3; colormap is NULL",
"`%s'",image->filename);
return(MagickFalse);
}
/*
Allocate the PNG structures
*/
#ifdef PNG_USER_MEM_SUPPORTED
ping=png_create_write_struct_2(PNG_LIBPNG_VER_STRING,image,
MagickPNGErrorHandler,MagickPNGWarningHandler,(void *) NULL,
(png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free);
#else
ping=png_create_write_struct(PNG_LIBPNG_VER_STRING,image,
MagickPNGErrorHandler,MagickPNGWarningHandler);
#endif
if (ping == (png_struct *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
ping_info=png_create_info_struct(ping);
if (ping_info == (png_info *) NULL)
{
png_destroy_write_struct(&ping,(png_info **) NULL);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
png_set_write_fn(ping,image,png_put_data,png_flush_data);
pixel_info=(MemoryInfo *) NULL;
if (setjmp(png_jmpbuf(ping)))
{
/*
PNG write failed.
*/
#ifdef PNG_DEBUG
if (image_info->verbose)
(void) printf("PNG write has failed.\n");
#endif
png_destroy_write_struct(&ping,&ping_info);
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
UnlockSemaphoreInfo(ping_semaphore);
#endif
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
return(MagickFalse);
}
/* { For navigation to end of SETJMP-protected block. Within this
* block, use png_error() instead of Throwing an Exception, to ensure
* that libpng is able to clean up, and that the semaphore is unlocked.
*/
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
LockSemaphoreInfo(ping_semaphore);
#endif
#ifdef PNG_BENIGN_ERRORS_SUPPORTED
/* Allow benign errors */
png_set_benign_errors(ping, 1);
#endif
#ifdef PNG_SET_USER_LIMITS_SUPPORTED
/* Reject images with too many rows or columns */
png_set_user_limits(ping,
(png_uint_32) MagickMin(0x7fffffffL,
GetMagickResourceLimit(WidthResource)),
(png_uint_32) MagickMin(0x7fffffffL,
GetMagickResourceLimit(HeightResource)));
#endif /* PNG_SET_USER_LIMITS_SUPPORTED */
/*
Prepare PNG for writing.
*/
#if defined(PNG_MNG_FEATURES_SUPPORTED)
if (mng_info->write_mng)
{
(void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES);
# ifdef PNG_WRITE_CHECK_FOR_INVALID_INDEX_SUPPORTED
/* Disable new libpng-1.5.10 feature when writing a MNG because
* zero-length PLTE is OK
*/
png_set_check_for_invalid_index (ping, 0);
# endif
}
#else
# ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED
if (mng_info->write_mng)
png_permit_empty_plte(ping,MagickTrue);
# endif
#endif
x=0;
ping_width=(png_uint_32) image->columns;
ping_height=(png_uint_32) image->rows;
if (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32)
image_depth=8;
if (mng_info->write_png48 || mng_info->write_png64)
image_depth=16;
if (mng_info->write_png_depth != 0)
image_depth=mng_info->write_png_depth;
/* Adjust requested depth to next higher valid depth if necessary */
if (image_depth > 8)
image_depth=16;
if ((image_depth > 4) && (image_depth < 8))
image_depth=8;
if (image_depth == 3)
image_depth=4;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" width=%.20g",(double) ping_width);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" height=%.20g",(double) ping_height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image_matte=%.20g",(double) image->matte);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->depth=%.20g",(double) image->depth);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Tentative ping_bit_depth=%.20g",(double) image_depth);
}
save_image_depth=image_depth;
ping_bit_depth=(png_byte) save_image_depth;
#if defined(PNG_pHYs_SUPPORTED)
if (ping_exclude_pHYs == MagickFalse)
{
if ((image->x_resolution != 0) && (image->y_resolution != 0) &&
(!mng_info->write_mng || !mng_info->equal_physs))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up pHYs chunk");
if (image->units == PixelsPerInchResolution)
{
ping_pHYs_unit_type=PNG_RESOLUTION_METER;
ping_pHYs_x_resolution=
(png_uint_32) ((100.0*image->x_resolution+0.5)/2.54);
ping_pHYs_y_resolution=
(png_uint_32) ((100.0*image->y_resolution+0.5)/2.54);
}
else if (image->units == PixelsPerCentimeterResolution)
{
ping_pHYs_unit_type=PNG_RESOLUTION_METER;
ping_pHYs_x_resolution=(png_uint_32) (100.0*image->x_resolution+0.5);
ping_pHYs_y_resolution=(png_uint_32) (100.0*image->y_resolution+0.5);
}
else
{
ping_pHYs_unit_type=PNG_RESOLUTION_UNKNOWN;
ping_pHYs_x_resolution=(png_uint_32) image->x_resolution;
ping_pHYs_y_resolution=(png_uint_32) image->y_resolution;
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Set up PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.",
(double) ping_pHYs_x_resolution,(double) ping_pHYs_y_resolution,
(int) ping_pHYs_unit_type);
ping_have_pHYs = MagickTrue;
}
}
#endif
if (ping_exclude_bKGD == MagickFalse)
{
if ((!mng_info->adjoin || !mng_info->equal_backgrounds))
{
unsigned int
mask;
mask=0xffff;
if (ping_bit_depth == 8)
mask=0x00ff;
if (ping_bit_depth == 4)
mask=0x000f;
if (ping_bit_depth == 2)
mask=0x0003;
if (ping_bit_depth == 1)
mask=0x0001;
ping_background.red=(png_uint_16)
(ScaleQuantumToShort(image->background_color.red) & mask);
ping_background.green=(png_uint_16)
(ScaleQuantumToShort(image->background_color.green) & mask);
ping_background.blue=(png_uint_16)
(ScaleQuantumToShort(image->background_color.blue) & mask);
ping_background.gray=(png_uint_16) ping_background.green;
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up bKGD chunk (1)");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" background_color index is %d",
(int) ping_background.index);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ping_bit_depth=%d",ping_bit_depth);
}
ping_have_bKGD = MagickTrue;
}
/*
Select the color type.
*/
matte=image_matte;
old_bit_depth=0;
if (mng_info->IsPalette && mng_info->write_png8)
{
/* To do: make this a function cause it's used twice, except
for reducing the sample depth from 8. */
number_colors=image_colors;
ping_have_tRNS=MagickFalse;
/*
Set image palette.
*/
ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up PLTE chunk with %d colors (%d)",
number_colors, image_colors);
for (i=0; i < (ssize_t) number_colors; i++)
{
palette[i].red=ScaleQuantumToChar(image->colormap[i].red);
palette[i].green=ScaleQuantumToChar(image->colormap[i].green);
palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
#if MAGICKCORE_QUANTUM_DEPTH == 8
" %3ld (%3d,%3d,%3d)",
#else
" %5ld (%5d,%5d,%5d)",
#endif
(long) i,palette[i].red,palette[i].green,palette[i].blue);
}
ping_have_PLTE=MagickTrue;
image_depth=ping_bit_depth;
ping_num_trans=0;
if (matte != MagickFalse)
{
/*
Identify which colormap entry is transparent.
*/
assert(number_colors <= 256);
assert(image->colormap != NULL);
for (i=0; i < (ssize_t) number_transparent; i++)
ping_trans_alpha[i]=0;
ping_num_trans=(unsigned short) (number_transparent +
number_semitransparent);
if (ping_num_trans == 0)
ping_have_tRNS=MagickFalse;
else
ping_have_tRNS=MagickTrue;
}
if (ping_exclude_bKGD == MagickFalse)
{
/*
* Identify which colormap entry is the background color.
*/
for (i=0; i < (ssize_t) MagickMax(1L*number_colors-1L,1L); i++)
if (IsPNGColorEqual(ping_background,image->colormap[i]))
break;
ping_background.index=(png_byte) i;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" background_color index is %d",
(int) ping_background.index);
}
}
} /* end of write_png8 */
else if (mng_info->write_png_colortype == 1)
{
image_matte=MagickFalse;
ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY;
}
else if (mng_info->write_png24 || mng_info->write_png48 ||
mng_info->write_png_colortype == 3)
{
image_matte=MagickFalse;
ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB;
}
else if (mng_info->write_png32 || mng_info->write_png64 ||
mng_info->write_png_colortype == 7)
{
image_matte=MagickTrue;
ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA;
}
else /* mng_info->write_pngNN not specified */
{
image_depth=ping_bit_depth;
if (mng_info->write_png_colortype != 0)
{
ping_color_type=(png_byte) mng_info->write_png_colortype-1;
if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA ||
ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA)
image_matte=MagickTrue;
else
image_matte=MagickFalse;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PNG colortype %d was specified:",(int) ping_color_type);
}
else /* write_png_colortype not specified */
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Selecting PNG colortype:");
if (image_info->type == TrueColorType)
{
ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB;
image_matte=MagickFalse;
}
else if (image_info->type == TrueColorMatteType)
{
ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA;
image_matte=MagickTrue;
}
else if (image_info->type == PaletteType ||
image_info->type == PaletteMatteType)
ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE;
else
{
if (ping_have_color == MagickFalse)
{
if (image_matte == MagickFalse)
{
ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY;
image_matte=MagickFalse;
}
else
{
ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY_ALPHA;
image_matte=MagickTrue;
}
}
else
{
if (image_matte == MagickFalse)
{
ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB;
image_matte=MagickFalse;
}
else
{
ping_color_type=(png_byte) PNG_COLOR_TYPE_RGBA;
image_matte=MagickTrue;
}
}
}
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Selected PNG colortype=%d",ping_color_type);
if (ping_bit_depth < 8)
{
if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA ||
ping_color_type == PNG_COLOR_TYPE_RGB ||
ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA)
ping_bit_depth=8;
}
old_bit_depth=ping_bit_depth;
if (ping_color_type == PNG_COLOR_TYPE_GRAY)
{
if (image->matte == MagickFalse && ping_have_non_bw == MagickFalse)
ping_bit_depth=1;
}
if (ping_color_type == PNG_COLOR_TYPE_PALETTE)
{
size_t one = 1;
ping_bit_depth=1;
if (image->colors == 0)
{
/* DO SOMETHING */
png_error(ping,"image has 0 colors");
}
while ((int) (one << ping_bit_depth) < (ssize_t) image_colors)
ping_bit_depth <<= 1;
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number of colors: %.20g",(double) image_colors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Tentative PNG bit depth: %d",ping_bit_depth);
}
if (ping_bit_depth < (int) mng_info->write_png_depth)
ping_bit_depth = mng_info->write_png_depth;
}
image_depth=ping_bit_depth;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Tentative PNG color type: %s (%.20g)",
PngColorTypeToString(ping_color_type),
(double) ping_color_type);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image_info->type: %.20g",(double) image_info->type);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image_depth: %.20g",(double) image_depth);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image->depth: %.20g",(double) image->depth);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ping_bit_depth: %.20g",(double) ping_bit_depth);
}
if (matte != MagickFalse)
{
if (mng_info->IsPalette)
{
if (mng_info->write_png_colortype == 0)
{
ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA;
if (ping_have_color != MagickFalse)
ping_color_type=PNG_COLOR_TYPE_RGBA;
}
/*
* Determine if there is any transparent color.
*/
if (number_transparent + number_semitransparent == 0)
{
/*
No transparent pixels are present. Change 4 or 6 to 0 or 2.
*/
image_matte=MagickFalse;
if (mng_info->write_png_colortype == 0)
ping_color_type&=0x03;
}
else
{
unsigned int
mask;
mask=0xffff;
if (ping_bit_depth == 8)
mask=0x00ff;
if (ping_bit_depth == 4)
mask=0x000f;
if (ping_bit_depth == 2)
mask=0x0003;
if (ping_bit_depth == 1)
mask=0x0001;
ping_trans_color.red=(png_uint_16)
(ScaleQuantumToShort(image->colormap[0].red) & mask);
ping_trans_color.green=(png_uint_16)
(ScaleQuantumToShort(image->colormap[0].green) & mask);
ping_trans_color.blue=(png_uint_16)
(ScaleQuantumToShort(image->colormap[0].blue) & mask);
ping_trans_color.gray=(png_uint_16)
(ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image,
image->colormap))) & mask);
ping_trans_color.index=(png_byte) 0;
ping_have_tRNS=MagickTrue;
}
if (ping_have_tRNS != MagickFalse)
{
/*
* Determine if there is one and only one transparent color
* and if so if it is fully transparent.
*/
if (ping_have_cheap_transparency == MagickFalse)
ping_have_tRNS=MagickFalse;
}
if (ping_have_tRNS != MagickFalse)
{
if (mng_info->write_png_colortype == 0)
ping_color_type &= 0x03; /* changes 4 or 6 to 0 or 2 */
if (image_depth == 8)
{
ping_trans_color.red&=0xff;
ping_trans_color.green&=0xff;
ping_trans_color.blue&=0xff;
ping_trans_color.gray&=0xff;
}
}
}
else
{
if (image_depth == 8)
{
ping_trans_color.red&=0xff;
ping_trans_color.green&=0xff;
ping_trans_color.blue&=0xff;
ping_trans_color.gray&=0xff;
}
}
}
matte=image_matte;
if (ping_have_tRNS != MagickFalse)
image_matte=MagickFalse;
if ((mng_info->IsPalette) &&
mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE &&
ping_have_color == MagickFalse &&
(image_matte == MagickFalse || image_depth >= 8))
{
size_t one=1;
if (image_matte != MagickFalse)
ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA;
else if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_GRAY_ALPHA)
{
ping_color_type=PNG_COLOR_TYPE_GRAY;
if (save_image_depth == 16 && image_depth == 8)
{
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Scaling ping_trans_color (0)");
}
ping_trans_color.gray*=0x0101;
}
}
if (image_depth > MAGICKCORE_QUANTUM_DEPTH)
image_depth=MAGICKCORE_QUANTUM_DEPTH;
if ((image_colors == 0) ||
((ssize_t) (image_colors-1) > (ssize_t) MaxColormapSize))
image_colors=(int) (one << image_depth);
if (image_depth > 8)
ping_bit_depth=16;
else
{
ping_bit_depth=8;
if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)
{
if(!mng_info->write_png_depth)
{
ping_bit_depth=1;
while ((int) (one << ping_bit_depth)
< (ssize_t) image_colors)
ping_bit_depth <<= 1;
}
}
else if (ping_color_type ==
PNG_COLOR_TYPE_GRAY && image_colors < 17 &&
mng_info->IsPalette)
{
/* Check if grayscale is reducible */
int
depth_4_ok=MagickTrue,
depth_2_ok=MagickTrue,
depth_1_ok=MagickTrue;
for (i=0; i < (ssize_t) image_colors; i++)
{
unsigned char
intensity;
intensity=ScaleQuantumToChar(image->colormap[i].red);
if ((intensity & 0x0f) != ((intensity & 0xf0) >> 4))
depth_4_ok=depth_2_ok=depth_1_ok=MagickFalse;
else if ((intensity & 0x03) != ((intensity & 0x0c) >> 2))
depth_2_ok=depth_1_ok=MagickFalse;
else if ((intensity & 0x01) != ((intensity & 0x02) >> 1))
depth_1_ok=MagickFalse;
}
if (depth_1_ok && mng_info->write_png_depth <= 1)
ping_bit_depth=1;
else if (depth_2_ok && mng_info->write_png_depth <= 2)
ping_bit_depth=2;
else if (depth_4_ok && mng_info->write_png_depth <= 4)
ping_bit_depth=4;
}
}
image_depth=ping_bit_depth;
}
else
if (mng_info->IsPalette)
{
number_colors=image_colors;
if (image_depth <= 8)
{
/*
Set image palette.
*/
ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE;
if (!(mng_info->have_write_global_plte && matte == MagickFalse))
{
for (i=0; i < (ssize_t) number_colors; i++)
{
palette[i].red=ScaleQuantumToChar(image->colormap[i].red);
palette[i].green=ScaleQuantumToChar(image->colormap[i].green);
palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue);
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up PLTE chunk with %d colors",
number_colors);
ping_have_PLTE=MagickTrue;
}
/* color_type is PNG_COLOR_TYPE_PALETTE */
if (mng_info->write_png_depth == 0)
{
size_t
one;
ping_bit_depth=1;
one=1;
while ((one << ping_bit_depth) < (size_t) number_colors)
ping_bit_depth <<= 1;
}
ping_num_trans=0;
if (matte != MagickFalse)
{
/*
* Set up trans_colors array.
*/
assert(number_colors <= 256);
ping_num_trans=(unsigned short) (number_transparent +
number_semitransparent);
if (ping_num_trans == 0)
ping_have_tRNS=MagickFalse;
else
{
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Scaling ping_trans_color (1)");
}
ping_have_tRNS=MagickTrue;
for (i=0; i < ping_num_trans; i++)
{
ping_trans_alpha[i]= (png_byte) (255-
ScaleQuantumToChar(image->colormap[i].opacity));
}
}
}
}
}
else
{
if (image_depth < 8)
image_depth=8;
if ((save_image_depth == 16) && (image_depth == 8))
{
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Scaling ping_trans_color from (%d,%d,%d)",
(int) ping_trans_color.red,
(int) ping_trans_color.green,
(int) ping_trans_color.blue);
}
ping_trans_color.red*=0x0101;
ping_trans_color.green*=0x0101;
ping_trans_color.blue*=0x0101;
ping_trans_color.gray*=0x0101;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" to (%d,%d,%d)",
(int) ping_trans_color.red,
(int) ping_trans_color.green,
(int) ping_trans_color.blue);
}
}
}
if (ping_bit_depth < (ssize_t) mng_info->write_png_depth)
ping_bit_depth = (ssize_t) mng_info->write_png_depth;
/*
Adjust background and transparency samples in sub-8-bit grayscale files.
*/
if (ping_bit_depth < 8 && ping_color_type ==
PNG_COLOR_TYPE_GRAY)
{
png_uint_16
maxval;
size_t
one=1;
maxval=(png_uint_16) ((one << ping_bit_depth)-1);
if (ping_exclude_bKGD == MagickFalse)
{
ping_background.gray=(png_uint_16)
((maxval/65535.)*(ScaleQuantumToShort((Quantum)
GetPixelLuma(image,&image->background_color)))+.5);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up bKGD chunk (2)");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ping_background.index is %d",
(int) ping_background.index);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ping_background.gray is %d",
(int) ping_background.gray);
}
ping_have_bKGD = MagickTrue;
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Scaling ping_trans_color.gray from %d",
(int)ping_trans_color.gray);
ping_trans_color.gray=(png_uint_16) ((maxval/255.)*(
ping_trans_color.gray)+.5);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" to %d", (int)ping_trans_color.gray);
}
if (ping_exclude_bKGD == MagickFalse)
{
if (mng_info->IsPalette && (int) ping_color_type == PNG_COLOR_TYPE_PALETTE)
{
/*
Identify which colormap entry is the background color.
*/
number_colors=image_colors;
for (i=0; i < (ssize_t) MagickMax(1L*number_colors,1L); i++)
if (IsPNGColorEqual(image->background_color,image->colormap[i]))
break;
ping_background.index=(png_byte) i;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up bKGD chunk with index=%d",(int) i);
}
if (i < (ssize_t) number_colors)
{
ping_have_bKGD = MagickTrue;
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" background =(%d,%d,%d)",
(int) ping_background.red,
(int) ping_background.green,
(int) ping_background.blue);
}
}
else /* Can't happen */
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" No room in PLTE to add bKGD color");
ping_have_bKGD = MagickFalse;
}
}
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PNG color type: %s (%d)", PngColorTypeToString(ping_color_type),
ping_color_type);
/*
Initialize compression level and filtering.
*/
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up deflate compression");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression buffer size: 32768");
}
png_set_compression_buffer_size(ping,32768L);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression mem level: 9");
png_set_compression_mem_level(ping, 9);
/* Untangle the "-quality" setting:
Undefined is 0; the default is used.
Default is 75
10's digit:
0 or omitted: Use Z_HUFFMAN_ONLY strategy with the
zlib default compression level
1-9: the zlib compression level
1's digit:
0-4: the PNG filter method
5: libpng adaptive filtering if compression level > 5
libpng filter type "none" if compression level <= 5
or if image is grayscale or palette
6: libpng adaptive filtering
7: "LOCO" filtering (intrapixel differing) if writing
a MNG, otherwise "none". Did not work in IM-6.7.0-9
and earlier because of a missing "else".
8: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), adaptive
filtering. Unused prior to IM-6.7.0-10, was same as 6
9: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), no PNG filters
Unused prior to IM-6.7.0-10, was same as 6
Note that using the -quality option, not all combinations of
PNG filter type, zlib compression level, and zlib compression
strategy are possible. This is addressed by using
"-define png:compression-strategy", etc., which takes precedence
over -quality.
*/
quality=image_info->quality == UndefinedCompressionQuality ? 75UL :
image_info->quality;
if (quality <= 9)
{
if (mng_info->write_png_compression_strategy == 0)
mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1;
}
else if (mng_info->write_png_compression_level == 0)
{
int
level;
level=(int) MagickMin((ssize_t) quality/10,9);
mng_info->write_png_compression_level = level+1;
}
if (mng_info->write_png_compression_strategy == 0)
{
if ((quality %10) == 8 || (quality %10) == 9)
#ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */
mng_info->write_png_compression_strategy=Z_RLE+1;
#else
mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1;
#endif
}
if (mng_info->write_png_compression_filter == 0)
mng_info->write_png_compression_filter=((int) quality % 10) + 1;
if (logging != MagickFalse)
{
if (mng_info->write_png_compression_level)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression level: %d",
(int) mng_info->write_png_compression_level-1);
if (mng_info->write_png_compression_strategy)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression strategy: %d",
(int) mng_info->write_png_compression_strategy-1);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up filtering");
if (mng_info->write_png_compression_filter == 6)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Base filter method: ADAPTIVE");
else if (mng_info->write_png_compression_filter == 0 ||
mng_info->write_png_compression_filter == 1)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Base filter method: NONE");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Base filter method: %d",
(int) mng_info->write_png_compression_filter-1);
}
if (mng_info->write_png_compression_level != 0)
png_set_compression_level(ping,mng_info->write_png_compression_level-1);
if (mng_info->write_png_compression_filter == 6)
{
if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) ||
((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) ||
(quality < 50))
png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS);
else
png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS);
}
else if (mng_info->write_png_compression_filter == 7 ||
mng_info->write_png_compression_filter == 10)
png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS);
else if (mng_info->write_png_compression_filter == 8)
{
#if defined(PNG_MNG_FEATURES_SUPPORTED) && defined(PNG_INTRAPIXEL_DIFFERENCING)
if (mng_info->write_mng)
{
if (((int) ping_color_type == PNG_COLOR_TYPE_RGB) ||
((int) ping_color_type == PNG_COLOR_TYPE_RGBA))
ping_filter_method=PNG_INTRAPIXEL_DIFFERENCING;
}
#endif
png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS);
}
else if (mng_info->write_png_compression_filter == 9)
png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS);
else if (mng_info->write_png_compression_filter != 0)
png_set_filter(ping,PNG_FILTER_TYPE_BASE,
mng_info->write_png_compression_filter-1);
if (mng_info->write_png_compression_strategy != 0)
png_set_compression_strategy(ping,
mng_info->write_png_compression_strategy-1);
ping_interlace_method=image_info->interlace != NoInterlace;
if (mng_info->write_mng)
png_set_sig_bytes(ping,8);
/* Bail out if cannot meet defined png:bit-depth or png:color-type */
if (mng_info->write_png_colortype != 0)
{
if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY)
if (ping_have_color != MagickFalse)
{
ping_color_type = PNG_COLOR_TYPE_RGB;
if (ping_bit_depth < 8)
ping_bit_depth=8;
}
if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY_ALPHA)
if (ping_have_color != MagickFalse)
ping_color_type = PNG_COLOR_TYPE_RGB_ALPHA;
}
if (ping_need_colortype_warning != MagickFalse ||
((mng_info->write_png_depth &&
(int) mng_info->write_png_depth != ping_bit_depth) ||
(mng_info->write_png_colortype &&
((int) mng_info->write_png_colortype-1 != ping_color_type &&
mng_info->write_png_colortype != 7 &&
!(mng_info->write_png_colortype == 5 && ping_color_type == 0)))))
{
if (logging != MagickFalse)
{
if (ping_need_colortype_warning != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image has transparency but tRNS chunk was excluded");
}
if (mng_info->write_png_depth)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Defined png:bit-depth=%u, Computed depth=%u",
mng_info->write_png_depth,
ping_bit_depth);
}
if (mng_info->write_png_colortype)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Defined png:color-type=%u, Computed color type=%u",
mng_info->write_png_colortype-1,
ping_color_type);
}
}
png_warning(ping,
"Cannot write image with defined png:bit-depth or png:color-type.");
}
if (image_matte != MagickFalse && image->matte == MagickFalse)
{
/* Add an opaque matte channel */
image->matte = MagickTrue;
(void) SetImageOpacity(image,0);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Added an opaque matte channel");
}
if (number_transparent != 0 || number_semitransparent != 0)
{
if (ping_color_type < 4)
{
ping_have_tRNS=MagickTrue;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting ping_have_tRNS=MagickTrue.");
}
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing PNG header chunks");
png_set_IHDR(ping,ping_info,ping_width,ping_height,
ping_bit_depth,ping_color_type,
ping_interlace_method,ping_compression_method,
ping_filter_method);
if (ping_color_type == 3 && ping_have_PLTE != MagickFalse)
{
if (mng_info->have_write_global_plte && matte == MagickFalse)
{
png_set_PLTE(ping,ping_info,NULL,0);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up empty PLTE chunk");
}
else
png_set_PLTE(ping,ping_info,palette,number_colors);
if (logging != MagickFalse)
{
for (i=0; i< (ssize_t) number_colors; i++)
{
if (i < ping_num_trans)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PLTE[%d] = (%d,%d,%d), tRNS[%d] = (%d)",
(int) i,
(int) palette[i].red,
(int) palette[i].green,
(int) palette[i].blue,
(int) i,
(int) ping_trans_alpha[i]);
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PLTE[%d] = (%d,%d,%d)",
(int) i,
(int) palette[i].red,
(int) palette[i].green,
(int) palette[i].blue);
}
}
}
/* Only write the iCCP chunk if we are not writing the sRGB chunk. */
if (ping_exclude_sRGB != MagickFalse ||
(!png_get_valid(ping,ping_info,PNG_INFO_sRGB)))
{
if ((ping_exclude_tEXt == MagickFalse ||
ping_exclude_zTXt == MagickFalse) &&
(ping_exclude_iCCP == MagickFalse || ping_exclude_zCCP == MagickFalse))
{
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
#ifdef PNG_WRITE_iCCP_SUPPORTED
if ((LocaleCompare(name,"ICC") == 0) ||
(LocaleCompare(name,"ICM") == 0))
{
if (ping_exclude_iCCP == MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up iCCP chunk");
png_set_iCCP(ping,ping_info,(const png_charp) name,0,
#if (PNG_LIBPNG_VER < 10500)
(png_charp) GetStringInfoDatum(profile),
#else
(const png_byte *) GetStringInfoDatum(profile),
#endif
(png_uint_32) GetStringInfoLength(profile));
ping_have_iCCP = MagickTrue;
}
}
else
#endif
{
if (LocaleCompare(name,"exif") == 0)
{
/* Do not write hex-encoded ICC chunk; we will
write it later as an eXIf chunk */
name=GetNextImageProfile(image);
continue;
}
if (ping_exclude_zCCP == MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up zTXT chunk with uuencoded ICC");
Magick_png_write_raw_profile(image_info,ping,ping_info,
(unsigned char *) name,(unsigned char *) name,
GetStringInfoDatum(profile),
(png_uint_32) GetStringInfoLength(profile));
ping_have_iCCP = MagickTrue;
}
}
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up text chunk with %s profile",name);
name=GetNextImageProfile(image);
}
}
}
#if defined(PNG_WRITE_sRGB_SUPPORTED)
if ((mng_info->have_write_global_srgb == 0) &&
ping_have_iCCP != MagickTrue &&
(ping_have_sRGB != MagickFalse ||
png_get_valid(ping,ping_info,PNG_INFO_sRGB)))
{
if (ping_exclude_sRGB == MagickFalse)
{
/*
Note image rendering intent.
*/
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up sRGB chunk");
(void) png_set_sRGB(ping,ping_info,(
Magick_RenderingIntent_to_PNG_RenderingIntent(
image->rendering_intent)));
ping_have_sRGB = MagickTrue;
}
}
if ((!mng_info->write_mng) || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB)))
#endif
{
if (ping_exclude_gAMA == MagickFalse &&
ping_have_iCCP == MagickFalse &&
ping_have_sRGB == MagickFalse &&
(ping_exclude_sRGB == MagickFalse ||
(image->gamma < .45 || image->gamma > .46)))
{
if ((mng_info->have_write_global_gama == 0) && (image->gamma != 0.0))
{
/*
Note image gamma.
To do: check for cHRM+gAMA == sRGB, and write sRGB instead.
*/
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up gAMA chunk");
png_set_gAMA(ping,ping_info,image->gamma);
}
}
if (ping_exclude_cHRM == MagickFalse && ping_have_sRGB == MagickFalse)
{
if ((mng_info->have_write_global_chrm == 0) &&
(image->chromaticity.red_primary.x != 0.0))
{
/*
Note image chromaticity.
Note: if cHRM+gAMA == sRGB write sRGB instead.
*/
PrimaryInfo
bp,
gp,
rp,
wp;
wp=image->chromaticity.white_point;
rp=image->chromaticity.red_primary;
gp=image->chromaticity.green_primary;
bp=image->chromaticity.blue_primary;
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up cHRM chunk");
png_set_cHRM(ping,ping_info,wp.x,wp.y,rp.x,rp.y,gp.x,gp.y,
bp.x,bp.y);
}
}
}
if (ping_exclude_bKGD == MagickFalse)
{
if (ping_have_bKGD != MagickFalse)
{
png_set_bKGD(ping,ping_info,&ping_background);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up bKGD chunk");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" background color = (%d,%d,%d)",
(int) ping_background.red,
(int) ping_background.green,
(int) ping_background.blue);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" index = %d, gray=%d",
(int) ping_background.index,
(int) ping_background.gray);
}
}
}
if (ping_exclude_pHYs == MagickFalse)
{
if (ping_have_pHYs != MagickFalse)
{
png_set_pHYs(ping,ping_info,
ping_pHYs_x_resolution,
ping_pHYs_y_resolution,
ping_pHYs_unit_type);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up pHYs chunk");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" x_resolution=%lu",
(unsigned long) ping_pHYs_x_resolution);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" y_resolution=%lu",
(unsigned long) ping_pHYs_y_resolution);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" unit_type=%lu",
(unsigned long) ping_pHYs_unit_type);
}
}
}
#if defined(PNG_tIME_SUPPORTED)
if (ping_exclude_tIME == MagickFalse)
{
const char
*timestamp;
if (image->taint == MagickFalse)
{
timestamp=GetImageOption(image_info,"png:tIME");
if (timestamp == (const char *) NULL)
timestamp=GetImageProperty(image,"png:tIME");
}
else
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reset tIME in tainted image");
timestamp=GetImageProperty(image,"date:modify");
}
if (timestamp != (const char *) NULL)
write_tIME_chunk(image,ping,ping_info,timestamp);
}
#endif
if (mng_info->need_blob != MagickFalse)
{
if (OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception) ==
MagickFalse)
png_error(ping,"WriteBlob Failed");
ping_have_blob=MagickTrue;
(void) ping_have_blob;
}
png_write_info_before_PLTE(ping, ping_info);
if (ping_have_tRNS != MagickFalse && ping_color_type < 4)
{
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Calling png_set_tRNS with num_trans=%d",ping_num_trans);
}
if (ping_color_type == 3)
(void) png_set_tRNS(ping, ping_info,
ping_trans_alpha,
ping_num_trans,
NULL);
else
{
(void) png_set_tRNS(ping, ping_info,
NULL,
0,
&ping_trans_color);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" tRNS color =(%d,%d,%d)",
(int) ping_trans_color.red,
(int) ping_trans_color.green,
(int) ping_trans_color.blue);
}
}
}
png_write_info(ping,ping_info);
ping_wrote_caNv = MagickFalse;
/* write caNv chunk */
if (ping_exclude_caNv == MagickFalse)
{
if ((image->page.width != 0 && image->page.width != image->columns) ||
(image->page.height != 0 && image->page.height != image->rows) ||
image->page.x != 0 || image->page.y != 0)
{
unsigned char
chunk[20];
(void) WriteBlobMSBULong(image,16L); /* data length=8 */
PNGType(chunk,mng_caNv);
LogPNGChunk(logging,mng_caNv,16L);
PNGLong(chunk+4,(png_uint_32) image->page.width);
PNGLong(chunk+8,(png_uint_32) image->page.height);
PNGsLong(chunk+12,(png_int_32) image->page.x);
PNGsLong(chunk+16,(png_int_32) image->page.y);
(void) WriteBlob(image,20,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,20));
ping_wrote_caNv = MagickTrue;
}
}
#if defined(PNG_oFFs_SUPPORTED)
if (ping_exclude_oFFs == MagickFalse && ping_wrote_caNv == MagickFalse)
{
if (image->page.x || image->page.y)
{
png_set_oFFs(ping,ping_info,(png_int_32) image->page.x,
(png_int_32) image->page.y, 0);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up oFFs chunk with x=%d, y=%d, units=0",
(int) image->page.x, (int) image->page.y);
}
}
#endif
#if (PNG_LIBPNG_VER == 10206)
/* avoid libpng-1.2.6 bug by setting PNG_HAVE_IDAT flag */
#define PNG_HAVE_IDAT 0x04
ping->mode |= PNG_HAVE_IDAT;
#undef PNG_HAVE_IDAT
#endif
png_set_packing(ping);
/*
Allocate memory.
*/
rowbytes=image->columns;
if (image_depth > 8)
rowbytes*=2;
switch (ping_color_type)
{
case PNG_COLOR_TYPE_RGB:
rowbytes*=3;
break;
case PNG_COLOR_TYPE_GRAY_ALPHA:
rowbytes*=2;
break;
case PNG_COLOR_TYPE_RGBA:
rowbytes*=4;
break;
default:
break;
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing PNG image data");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Allocating %.20g bytes of memory for pixels",(double) rowbytes);
}
pixel_info=AcquireVirtualMemory(rowbytes,sizeof(*ping_pixels));
if (pixel_info == (MemoryInfo *) NULL)
png_error(ping,"Allocation of memory for pixels failed");
ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Initialize image scanlines.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
png_error(ping,"Memory allocation for quantum_info failed");
quantum_info->format=UndefinedQuantumFormat;
SetQuantumDepth(image,quantum_info,image_depth);
(void) SetQuantumEndian(image,quantum_info,MSBEndian);
num_passes=png_set_interlace_handling(ping);
if ((!mng_info->write_png8 && !mng_info->write_png24 &&
!mng_info->write_png48 && !mng_info->write_png64 &&
!mng_info->write_png32) &&
(mng_info->IsPalette ||
(image_info->type == BilevelType)) &&
image_matte == MagickFalse &&
ping_have_non_bw == MagickFalse)
{
/* Palette, Bilevel, or Opaque Monochrome */
register const PixelPacket
*p;
SetQuantumDepth(image,quantum_info,8);
for (pass=0; pass < num_passes; pass++)
{
/*
Convert PseudoClass image to a PNG monochrome image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing row of pixels (0)");
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
if (mng_info->IsPalette)
{
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,ping_pixels,&image->exception);
if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_PALETTE &&
mng_info->write_png_depth &&
mng_info->write_png_depth != old_bit_depth)
{
/* Undo pixel scaling */
for (i=0; i < (ssize_t) image->columns; i++)
*(ping_pixels+i)=(unsigned char) (*(ping_pixels+i)
>> (8-old_bit_depth));
}
}
else
{
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,RedQuantum,ping_pixels,&image->exception);
}
if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE)
for (i=0; i < (ssize_t) image->columns; i++)
*(ping_pixels+i)=(unsigned char) ((*(ping_pixels+i) > 127) ?
255 : 0);
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing row of pixels (1)");
png_write_row(ping,ping_pixels);
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) (pass * image->rows + y),
num_passes * image->rows);
if (status == MagickFalse)
break;
}
}
}
else /* Not Palette, Bilevel, or Opaque Monochrome */
{
if ((!mng_info->write_png8 && !mng_info->write_png24 &&
!mng_info->write_png48 && !mng_info->write_png64 &&
!mng_info->write_png32) && (image_matte != MagickFalse ||
(ping_bit_depth >= MAGICKCORE_QUANTUM_DEPTH)) &&
(mng_info->IsPalette) && ping_have_color == MagickFalse)
{
register const PixelPacket
*p;
for (pass=0; pass < num_passes; pass++)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
if (ping_color_type == PNG_COLOR_TYPE_GRAY)
{
if (mng_info->IsPalette)
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,ping_pixels,&image->exception);
else
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,RedQuantum,ping_pixels,&image->exception);
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GRAY PNG pixels (2)");
}
else /* PNG_COLOR_TYPE_GRAY_ALPHA */
{
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GRAY_ALPHA PNG pixels (2)");
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayAlphaQuantum,ping_pixels,&image->exception);
}
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing row of pixels (2)");
png_write_row(ping,ping_pixels);
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) (pass * image->rows + y),
num_passes * image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
register const PixelPacket
*p;
for (pass=0; pass < num_passes; pass++)
{
if ((image_depth > 8) ||
mng_info->write_png24 ||
mng_info->write_png32 ||
mng_info->write_png48 ||
mng_info->write_png64 ||
(!mng_info->write_png8 && !mng_info->IsPalette))
{
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
if (ping_color_type == PNG_COLOR_TYPE_GRAY)
{
if (image->storage_class == DirectClass)
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,RedQuantum,ping_pixels,&image->exception);
else
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,ping_pixels,&image->exception);
}
else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
{
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayAlphaQuantum,ping_pixels,
&image->exception);
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GRAY_ALPHA PNG pixels (3)");
}
else if (image_matte != MagickFalse)
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,RGBAQuantum,ping_pixels,&image->exception);
else
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,RGBQuantum,ping_pixels,&image->exception);
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing row of pixels (3)");
png_write_row(ping,ping_pixels);
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) (pass * image->rows + y),
num_passes * image->rows);
if (status == MagickFalse)
break;
}
}
else
/* not ((image_depth > 8) ||
mng_info->write_png24 || mng_info->write_png32 ||
mng_info->write_png48 || mng_info->write_png64 ||
(!mng_info->write_png8 && !mng_info->IsPalette))
*/
{
if ((ping_color_type != PNG_COLOR_TYPE_GRAY) &&
(ping_color_type != PNG_COLOR_TYPE_GRAY_ALPHA))
{
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" pass %d, Image Is not GRAY or GRAY_ALPHA",pass);
SetQuantumDepth(image,quantum_info,8);
image_depth=8;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" pass %d, Image Is RGB, 16-bit GRAY, or GRAY_ALPHA",pass);
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
if (ping_color_type == PNG_COLOR_TYPE_GRAY)
{
SetQuantumDepth(image,quantum_info,image->depth);
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayQuantum,ping_pixels,&image->exception);
}
else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)
{
if (logging != MagickFalse && y == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing GRAY_ALPHA PNG pixels (4)");
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,GrayAlphaQuantum,ping_pixels,
&image->exception);
}
else
{
(void) ExportQuantumPixels(image,(const CacheView *) NULL,
quantum_info,IndexQuantum,ping_pixels,&image->exception);
if (logging != MagickFalse && y <= 2)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing row of non-gray pixels (4)");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" ping_pixels[0]=%d,ping_pixels[1]=%d",
(int)ping_pixels[0],(int)ping_pixels[1]);
}
}
png_write_row(ping,ping_pixels);
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) (pass * image->rows + y),
num_passes * image->rows);
if (status == MagickFalse)
break;
}
}
}
}
}
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Wrote PNG image data");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Width: %.20g",(double) ping_width);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Height: %.20g",(double) ping_height);
if (mng_info->write_png_depth)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Defined png:bit-depth: %d",mng_info->write_png_depth);
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PNG bit-depth written: %d",ping_bit_depth);
if (mng_info->write_png_colortype)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Defined png:color-type: %d",mng_info->write_png_colortype-1);
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PNG color-type written: %d",ping_color_type);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" PNG Interlace method: %d",ping_interlace_method);
}
/*
Generate text chunks after IDAT.
*/
if (ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse)
{
ResetImagePropertyIterator(image);
property=GetNextImageProperty(image);
while (property != (const char *) NULL)
{
png_textp
text;
value=GetImageProperty(image,property);
/* Don't write any "png:" or "jpeg:" properties; those are just for
* "identify" or for passing through to another JPEG
*/
if ((LocaleNCompare(property,"png:",4) != 0 &&
LocaleNCompare(property,"jpeg:",5) != 0) &&
/* Suppress density and units if we wrote a pHYs chunk */
(ping_exclude_pHYs != MagickFalse ||
LocaleCompare(property,"density") != 0 ||
LocaleCompare(property,"units") != 0) &&
/* Suppress the IM-generated Date:create and Date:modify */
(ping_exclude_date == MagickFalse ||
LocaleNCompare(property, "Date:",5) != 0))
{
if (value != (const char *) NULL)
{
#if PNG_LIBPNG_VER >= 10400
text=(png_textp) png_malloc(ping,
(png_alloc_size_t) sizeof(png_text));
#else
text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text));
#endif
text[0].key=(char *) property;
text[0].text=(char *) value;
text[0].text_length=strlen(value);
if (ping_exclude_tEXt != MagickFalse)
text[0].compression=PNG_TEXT_COMPRESSION_zTXt;
else if (ping_exclude_zTXt != MagickFalse)
text[0].compression=PNG_TEXT_COMPRESSION_NONE;
else
{
text[0].compression=image_info->compression == NoCompression ||
(image_info->compression == UndefinedCompression &&
text[0].text_length < 128) ? PNG_TEXT_COMPRESSION_NONE :
PNG_TEXT_COMPRESSION_zTXt ;
}
if (logging != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Setting up text chunk");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" keyword: '%s'",text[0].key);
}
png_set_text(ping,ping_info,text,1);
png_free(ping,text);
}
}
property=GetNextImageProperty(image);
}
}
/* write eXIf profile */
if (ping_have_eXIf != MagickFalse && ping_exclude_eXIf == MagickFalse)
{
char
*name;
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
if (LocaleCompare(name,"exif") == 0)
{
const StringInfo
*profile;
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
png_uint_32
length;
unsigned char
chunk[4],
*data;
StringInfo
*ping_profile;
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Have eXIf profile");
ping_profile=CloneStringInfo(profile);
data=GetStringInfoDatum(ping_profile),
length=(png_uint_32) GetStringInfoLength(ping_profile);
PNGType(chunk,mng_eXIf);
if (length < 7)
{
ping_profile=DestroyStringInfo(ping_profile);
break; /* otherwise crashes */
}
if (*data == 'E' && *(data+1) == 'x' && *(data+2) == 'i' &&
*(data+3) == 'f' && *(data+4) == '\0' && *(data+5) == '\0')
{
/* skip the "Exif\0\0" JFIF Exif Header ID */
length -= 6;
data += 6;
}
LogPNGChunk(logging,chunk,length);
(void) WriteBlobMSBULong(image,length);
(void) WriteBlob(image,4,chunk);
(void) WriteBlob(image,length,data);
(void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4), data,
(uInt) length));
ping_profile=DestroyStringInfo(ping_profile);
break;
}
}
name=GetNextImageProfile(image);
}
}
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing PNG end info");
png_write_end(ping,ping_info);
if (mng_info->need_fram && (int) image->dispose == BackgroundDispose)
{
if (mng_info->page.x || mng_info->page.y ||
(ping_width != mng_info->page.width) ||
(ping_height != mng_info->page.height))
{
unsigned char
chunk[32];
/*
Write FRAM 4 with clipping boundaries followed by FRAM 1.
*/
(void) WriteBlobMSBULong(image,27L); /* data length=27 */
PNGType(chunk,mng_FRAM);
LogPNGChunk(logging,mng_FRAM,27L);
chunk[4]=4;
chunk[5]=0; /* frame name separator (no name) */
chunk[6]=1; /* flag for changing delay, for next frame only */
chunk[7]=0; /* flag for changing frame timeout */
chunk[8]=1; /* flag for changing frame clipping for next frame */
chunk[9]=0; /* flag for changing frame sync_id */
PNGLong(chunk+10,(png_uint_32) (0L)); /* temporary 0 delay */
chunk[14]=0; /* clipping boundaries delta type */
PNGLong(chunk+15,(png_uint_32) (mng_info->page.x)); /* left cb */
PNGLong(chunk+19,
(png_uint_32) (mng_info->page.x + ping_width));
PNGLong(chunk+23,(png_uint_32) (mng_info->page.y)); /* top cb */
PNGLong(chunk+27,
(png_uint_32) (mng_info->page.y + ping_height));
(void) WriteBlob(image,31,chunk);
(void) WriteBlobMSBULong(image,crc32(0,chunk,31));
mng_info->old_framing_mode=4;
mng_info->framing_mode=1;
}
else
mng_info->framing_mode=3;
}
if (mng_info->write_mng && !mng_info->need_fram &&
((int) image->dispose == 3))
png_error(ping, "Cannot convert GIF with disposal method 3 to MNG-LC");
/*
Free PNG resources.
*/
png_destroy_write_struct(&ping,&ping_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
/* Store bit depth actually written */
s[0]=(char) ping_bit_depth;
s[1]='\0';
(void) SetImageProperty(image,"png:bit-depth-written",s);
if (logging != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" exit WriteOnePNGImage()");
#ifdef IMPNG_SETJMP_NOT_THREAD_SAFE
UnlockSemaphoreInfo(ping_semaphore);
#endif
/* } for navigation to beginning of SETJMP-protected block. Revert to
* Throwing an Exception when an error occurs.
*/
return(MagickTrue);
/* End write one PNG image */
}
| 0 |
[
"CWE-772"
] |
ImageMagick6
|
3449a06f0122d4d9e68b4739417a3eaad0b24265
| 213,562,608,869,729,260,000,000,000,000,000,000,000 | 3,578 |
https://github.com/ImageMagick/ImageMagick/issues/1201
|
void frma_box_del(GF_Box *s)
{
GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s;
if (ptr == NULL) return;
gf_free(ptr);
}
| 0 |
[
"CWE-703"
] |
gpac
|
f19668964bf422cf5a63e4dbe1d3c6c75edadcbb
| 303,581,142,343,240,350,000,000,000,000,000,000,000 | 6 |
fixed #1879
|
nautilus_file_get_icon (NautilusFile *file,
int size,
NautilusFileIconFlags flags)
{
NautilusIconInfo *icon;
GIcon *gicon;
GdkPixbuf *raw_pixbuf, *scaled_pixbuf;
int modified_size;
if (file == NULL) {
return NULL;
}
gicon = get_custom_icon (file);
if (gicon) {
icon = nautilus_icon_info_lookup (gicon, size);
g_object_unref (gicon);
return icon;
}
if (flags & NAUTILUS_FILE_ICON_FLAGS_FORCE_THUMBNAIL_SIZE) {
modified_size = size;
} else {
modified_size = size * cached_thumbnail_size / NAUTILUS_ICON_SIZE_STANDARD;
}
if (flags & NAUTILUS_FILE_ICON_FLAGS_USE_THUMBNAILS &&
nautilus_file_should_show_thumbnail (file)) {
if (file->details->thumbnail) {
int w, h, s;
double scale;
raw_pixbuf = g_object_ref (file->details->thumbnail);
w = gdk_pixbuf_get_width (raw_pixbuf);
h = gdk_pixbuf_get_height (raw_pixbuf);
s = MAX (w, h);
scale = (double)modified_size / s;
if (scale > 0.99) {
/* never scale any thumbnails up */
scaled_pixbuf = gdk_pixbuf_new (GDK_COLORSPACE_RGB,
gdk_pixbuf_get_has_alpha (raw_pixbuf),
gdk_pixbuf_get_bits_per_sample (raw_pixbuf),
w * scale, h * scale);
gdk_pixbuf_fill (scaled_pixbuf, 0xffffff00);
gdk_pixbuf_copy_area (raw_pixbuf,
0, 0, w, h,
scaled_pixbuf,
(gdk_pixbuf_get_width (scaled_pixbuf) - w) / 2,
(gdk_pixbuf_get_height (scaled_pixbuf) - h) / 2);
} else {
scaled_pixbuf = gdk_pixbuf_scale_simple (raw_pixbuf,
w * scale, h * scale,
GDK_INTERP_BILINEAR);
}
nautilus_thumbnail_frame_image (&scaled_pixbuf);
g_object_unref (raw_pixbuf);
if (modified_size > 128 &&
!file->details->thumbnail_wants_original) {
/* Invalidate if we resize upward */
file->details->thumbnail_wants_original = TRUE;
nautilus_file_invalidate_attributes (file, NAUTILUS_FILE_ATTRIBUTE_THUMBNAIL);
}
icon = nautilus_icon_info_new_for_pixbuf (scaled_pixbuf);
g_object_unref (scaled_pixbuf);
return icon;
} else if (file->details->thumbnail_path == NULL &&
file->details->can_read &&
!file->details->is_thumbnailing &&
(!file->details->thumbnailing_failed ||
!nautilus_has_valid_failed_thumbnail (file))) {
if (nautilus_can_thumbnail (file)) {
nautilus_create_thumbnail (file);
}
}
}
if (file->details->is_thumbnailing &&
flags & NAUTILUS_FILE_ICON_FLAGS_USE_THUMBNAILS)
gicon = g_themed_icon_new (ICON_NAME_THUMBNAIL_LOADING);
else
gicon = nautilus_file_get_gicon (file, flags);
if (gicon) {
icon = nautilus_icon_info_lookup (gicon, size);
if (nautilus_icon_info_is_fallback (icon)) {
g_object_unref (icon);
icon = nautilus_icon_info_lookup (get_default_file_icon (flags), size);
}
g_object_unref (gicon);
return icon;
} else {
return nautilus_icon_info_lookup (get_default_file_icon (flags), size);
}
}
| 0 |
[] |
nautilus
|
7632a3e13874a2c5e8988428ca913620a25df983
| 45,580,439,508,203,710,000,000,000,000,000,000,000 | 100 |
Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003
|
ref_param_read_signal_error(gs_param_list * plist, gs_param_name pkey, int code)
{
iparam_list *const iplist = (iparam_list *) plist;
iparam_loc loc = {0};
ref_param_read(iplist, pkey, &loc, -1);
if (loc.presult)
*loc.presult = code;
switch (ref_param_read_get_policy(plist, pkey)) {
case gs_param_policy_ignore:
return 0;
case gs_param_policy_consult_user:
return_error(gs_error_configurationerror);
default:
return code;
}
}
| 0 |
[] |
ghostpdl
|
c3476dde7743761a4e1d39a631716199b696b880
| 329,341,864,197,710,230,000,000,000,000,000,000,000 | 17 |
Bug 699656: Handle LockDistillerParams not being a boolean
This caused a function call commented as "Can't fail" to fail, and resulted
in memory correuption and a segfault.
|
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
.p = p,
.src_cpu = task_cpu(p),
.src_nid = task_node(p),
.imbalance_pct = 112,
.best_task = NULL,
.best_imp = 0,
.best_cpu = -1,
};
unsigned long taskweight, groupweight;
struct sched_domain *sd;
long taskimp, groupimp;
struct numa_group *ng;
struct rq *best_rq;
int nid, ret, dist;
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
* imbalance and would be the first to start moving tasks about.
*
* And we want to avoid any moving of tasks about, as that would create
* random movement of tasks -- counter the numa conditions we're trying
* to satisfy here.
*/
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
if (sd)
env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
rcu_read_unlock();
/*
* Cpusets can break the scheduler domain tree into smaller
* balance domains, some of which do not cross NUMA boundaries.
* Tasks that are "trapped" in such domains cannot be migrated
* elsewhere, so there is no point in (re)trying.
*/
if (unlikely(!sd)) {
sched_setnuma(p, task_node(p));
return -EINVAL;
}
env.dst_nid = p->numa_preferred_nid;
dist = env.dist = node_distance(env.src_nid, env.dst_nid);
taskweight = task_weight(p, env.src_nid, dist);
groupweight = group_weight(p, env.src_nid, dist);
update_numa_stats(&env.src_stats, env.src_nid);
taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
update_numa_stats(&env.dst_stats, env.dst_nid);
/* Try to find a spot on the preferred nid. */
task_numa_find_cpu(&env, taskimp, groupimp);
/*
* Look at other nodes in these cases:
* - there is no space available on the preferred_nid
* - the task is part of a numa_group that is interleaved across
* multiple NUMA nodes; in order to better consolidate the group,
* we need to check other locations.
*/
ng = deref_curr_numa_group(p);
if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
for_each_online_node(nid) {
if (nid == env.src_nid || nid == p->numa_preferred_nid)
continue;
dist = node_distance(env.src_nid, env.dst_nid);
if (sched_numa_topology_type == NUMA_BACKPLANE &&
dist != env.dist) {
taskweight = task_weight(p, env.src_nid, dist);
groupweight = group_weight(p, env.src_nid, dist);
}
/* Only consider nodes where both task and groups benefit */
taskimp = task_weight(p, nid, dist) - taskweight;
groupimp = group_weight(p, nid, dist) - groupweight;
if (taskimp < 0 && groupimp < 0)
continue;
env.dist = dist;
env.dst_nid = nid;
update_numa_stats(&env.dst_stats, env.dst_nid);
task_numa_find_cpu(&env, taskimp, groupimp);
}
}
/*
* If the task is part of a workload that spans multiple NUMA nodes,
* and is migrating into one of the workload's active nodes, remember
* this node as the task's preferred numa node, so the workload can
* settle down.
* A task that migrated to a second choice node will be better off
* trying for a better one later. Do not set the preferred node here.
*/
if (ng) {
if (env.best_cpu == -1)
nid = env.src_nid;
else
nid = cpu_to_node(env.best_cpu);
if (nid != p->numa_preferred_nid)
sched_setnuma(p, nid);
}
/* No better CPU than the current one was found. */
if (env.best_cpu == -1)
return -EAGAIN;
best_rq = cpu_rq(env.best_cpu);
if (env.best_task == NULL) {
ret = migrate_task_to(p, env.best_cpu);
WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
return ret;
}
ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
put_task_struct(env.best_task);
return ret;
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
linux
|
de53fd7aedb100f03e5d2231cfce0e4993282425
| 106,661,456,565,647,740,000,000,000,000,000,000,000 | 130 |
sched/fair: Fix low cpu usage with high throttling by removing expiration of cpu-local slices
It has been observed, that highly-threaded, non-cpu-bound applications
running under cpu.cfs_quota_us constraints can hit a high percentage of
periods throttled while simultaneously not consuming the allocated
amount of quota. This use case is typical of user-interactive non-cpu
bound applications, such as those running in kubernetes or mesos when
run on multiple cpu cores.
This has been root caused to cpu-local run queue being allocated per cpu
bandwidth slices, and then not fully using that slice within the period.
At which point the slice and quota expires. This expiration of unused
slice results in applications not being able to utilize the quota for
which they are allocated.
The non-expiration of per-cpu slices was recently fixed by
'commit 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift
condition")'. Prior to that it appears that this had been broken since
at least 'commit 51f2176d74ac ("sched/fair: Fix unlocked reads of some
cfs_b->quota/period")' which was introduced in v3.16-rc1 in 2014. That
added the following conditional which resulted in slices never being
expired.
if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
Because this was broken for nearly 5 years, and has recently been fixed
and is now being noticed by many users running kubernetes
(https://github.com/kubernetes/kubernetes/issues/67577) it is my opinion
that the mechanisms around expiring runtime should be removed
altogether.
This allows quota already allocated to per-cpu run-queues to live longer
than the period boundary. This allows threads on runqueues that do not
use much CPU to continue to use their remaining slice over a longer
period of time than cpu.cfs_period_us. However, this helps prevent the
above condition of hitting throttling while also not fully utilizing
your cpu quota.
This theoretically allows a machine to use slightly more than its
allotted quota in some periods. This overflow would be bounded by the
remaining quota left on each per-cpu runqueueu. This is typically no
more than min_cfs_rq_runtime=1ms per cpu. For CPU bound tasks this will
change nothing, as they should theoretically fully utilize all of their
quota in each period. For user-interactive tasks as described above this
provides a much better user/application experience as their cpu
utilization will more closely match the amount they requested when they
hit throttling. This means that cpu limits no longer strictly apply per
period for non-cpu bound applications, but that they are still accurate
over longer timeframes.
This greatly improves performance of high-thread-count, non-cpu bound
applications with low cfs_quota_us allocation on high-core-count
machines. In the case of an artificial testcase (10ms/100ms of quota on
80 CPU machine), this commit resulted in almost 30x performance
improvement, while still maintaining correct cpu quota restrictions.
That testcase is available at https://github.com/indeedeng/fibtest.
Fixes: 512ac999d275 ("sched/fair: Fix bandwidth timer clock drift condition")
Signed-off-by: Dave Chiluk <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Phil Auld <[email protected]>
Reviewed-by: Ben Segall <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: John Hammond <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Kyle Anderson <[email protected]>
Cc: Gabriel Munos <[email protected]>
Cc: Peter Oskolkov <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: Brendan Gregg <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
static int init_subctxts(struct qib_devdata *dd,
struct qib_ctxtdata *rcd,
const struct qib_user_info *uinfo)
{
int ret = 0;
unsigned num_subctxts;
size_t size;
/*
* If the user is requesting zero subctxts,
* skip the subctxt allocation.
*/
if (uinfo->spu_subctxt_cnt <= 0)
goto bail;
num_subctxts = uinfo->spu_subctxt_cnt;
/* Check for subctxt compatibility */
if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
uinfo->spu_userversion & 0xffff)) {
qib_devinfo(dd->pcidev,
"Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
(int) (uinfo->spu_userversion >> 16),
(int) (uinfo->spu_userversion & 0xffff),
QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
goto bail;
}
if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
ret = -EINVAL;
goto bail;
}
rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
if (!rcd->subctxt_uregbase) {
ret = -ENOMEM;
goto bail;
}
/* Note: rcd->rcvhdrq_size isn't initialized yet. */
size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
sizeof(u32), PAGE_SIZE) * num_subctxts;
rcd->subctxt_rcvhdr_base = vmalloc_user(size);
if (!rcd->subctxt_rcvhdr_base) {
ret = -ENOMEM;
goto bail_ureg;
}
rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
rcd->rcvegrbuf_size *
num_subctxts);
if (!rcd->subctxt_rcvegrbuf) {
ret = -ENOMEM;
goto bail_rhdr;
}
rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
rcd->subctxt_id = uinfo->spu_subctxt_id;
rcd->active_slaves = 1;
rcd->redirect_seq_cnt = 1;
set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
goto bail;
bail_rhdr:
vfree(rcd->subctxt_rcvhdr_base);
bail_ureg:
vfree(rcd->subctxt_uregbase);
rcd->subctxt_uregbase = NULL;
bail:
return ret;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3
| 33,605,214,458,241,240,000,000,000,000,000,000,000 | 68 |
IB/security: Restrict use of the write() interface
The drivers/infiniband stack uses write() as a replacement for
bi-directional ioctl(). This is not safe. There are ways to
trigger write calls that result in the return structure that
is normally written to user space being shunted off to user
specified kernel memory instead.
For the immediate repair, detect and deny suspicious accesses to
the write API.
For long term, update the user space libraries and the kernel API
to something that doesn't present the same security vulnerabilities
(likely a structured ioctl() interface).
The impacted uAPI interfaces are generally only available if
hardware from drivers/infiniband is installed in the system.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
[ Expanded check to all known write() entry points ]
Cc: [email protected]
Signed-off-by: Doug Ledford <[email protected]>
|
int uwsgi_long2str2n(unsigned long long num, char *ptr, int size) {
int ret = snprintf(ptr, size, "%llu", num);
if (ret <= 0 || ret > size)
return 0;
return ret;
}
| 0 |
[
"CWE-119",
"CWE-703",
"CWE-787"
] |
uwsgi
|
cb4636f7c0af2e97a4eef7a3cdcbd85a71247bfe
| 307,174,088,568,968,730,000,000,000,000,000,000,000 | 6 |
improve uwsgi_expand_path() to sanitize input, avoiding stack corruption and potential security issue
|
unsigned long dtls1_output_cert_chain(SSL *s, X509 *x)
{
unsigned char *p;
int i;
unsigned long l = 3 + DTLS1_HM_HEADER_LENGTH;
BUF_MEM *buf;
/* TLSv1 sends a chain with nothing in it, instead of an alert */
buf = s->init_buf;
if (!BUF_MEM_grow_clean(buf, 10)) {
SSLerr(SSL_F_DTLS1_OUTPUT_CERT_CHAIN, ERR_R_BUF_LIB);
return (0);
}
if (x != NULL) {
X509_STORE_CTX xs_ctx;
if (!X509_STORE_CTX_init(&xs_ctx, s->ctx->cert_store, x, NULL)) {
SSLerr(SSL_F_DTLS1_OUTPUT_CERT_CHAIN, ERR_R_X509_LIB);
return (0);
}
X509_verify_cert(&xs_ctx);
/* Don't leave errors in the queue */
ERR_clear_error();
for (i = 0; i < sk_X509_num(xs_ctx.chain); i++) {
x = sk_X509_value(xs_ctx.chain, i);
if (!dtls1_add_cert_to_buf(buf, &l, x)) {
X509_STORE_CTX_cleanup(&xs_ctx);
return 0;
}
}
X509_STORE_CTX_cleanup(&xs_ctx);
}
/* Thawte special :-) */
for (i = 0; i < sk_X509_num(s->ctx->extra_certs); i++) {
x = sk_X509_value(s->ctx->extra_certs, i);
if (!dtls1_add_cert_to_buf(buf, &l, x))
return 0;
}
l -= (3 + DTLS1_HM_HEADER_LENGTH);
p = (unsigned char *)&(buf->data[DTLS1_HM_HEADER_LENGTH]);
l2n3(l, p);
l += 3;
p = (unsigned char *)&(buf->data[0]);
p = dtls1_set_message_header(s, p, SSL3_MT_CERTIFICATE, l, 0, l);
l += DTLS1_HM_HEADER_LENGTH;
return (l);
}
| 0 |
[
"CWE-399"
] |
openssl
|
00a4c1421407b6ac796688871b0a49a179c694d9
| 288,536,467,658,217,500,000,000,000,000,000,000,000 | 52 |
Fix DTLS buffered message DoS attack
DTLS can handle out of order record delivery. Additionally since
handshake messages can be bigger than will fit into a single packet, the
messages can be fragmented across multiple records (as with normal TLS).
That means that the messages can arrive mixed up, and we have to
reassemble them. We keep a queue of buffered messages that are "from the
future", i.e. messages we're not ready to deal with yet but have arrived
early. The messages held there may not be full yet - they could be one
or more fragments that are still in the process of being reassembled.
The code assumes that we will eventually complete the reassembly and
when that occurs the complete message is removed from the queue at the
point that we need to use it.
However, DTLS is also tolerant of packet loss. To get around that DTLS
messages can be retransmitted. If we receive a full (non-fragmented)
message from the peer after previously having received a fragment of
that message, then we ignore the message in the queue and just use the
non-fragmented version. At that point the queued message will never get
removed.
Additionally the peer could send "future" messages that we never get to
in order to complete the handshake. Each message has a sequence number
(starting from 0). We will accept a message fragment for the current
message sequence number, or for any sequence up to 10 into the future.
However if the Finished message has a sequence number of 2, anything
greater than that in the queue is just left there.
So, in those two ways we can end up with "orphaned" data in the queue
that will never get removed - except when the connection is closed. At
that point all the queues are flushed.
An attacker could seek to exploit this by filling up the queues with
lots of large messages that are never going to be used in order to
attempt a DoS by memory exhaustion.
I will assume that we are only concerned with servers here. It does not
seem reasonable to be concerned about a memory exhaustion attack on a
client. They are unlikely to process enough connections for this to be
an issue.
A "long" handshake with many messages might be 5 messages long (in the
incoming direction), e.g. ClientHello, Certificate, ClientKeyExchange,
CertificateVerify, Finished. So this would be message sequence numbers 0
to 4. Additionally we can buffer up to 10 messages in the future.
Therefore the maximum number of messages that an attacker could send
that could get orphaned would typically be 15.
The maximum size that a DTLS message is allowed to be is defined by
max_cert_list, which by default is 100k. Therefore the maximum amount of
"orphaned" memory per connection is 1500k.
Message sequence numbers get reset after the Finished message, so
renegotiation will not extend the maximum number of messages that can be
orphaned per connection.
As noted above, the queues do get cleared when the connection is closed.
Therefore in order to mount an effective attack, an attacker would have
to open many simultaneous connections.
Issue reported by Quan Luo.
CVE-2016-2179
Reviewed-by: Richard Levitte <[email protected]>
|
void DcmSCP::notifyRECEIVEProgress(const unsigned long byteCount)
{
DCMNET_TRACE("Bytes received: " << byteCount);
}
| 0 |
[
"CWE-264"
] |
dcmtk
|
beaf5a5c24101daeeafa48c375120b16197c9e95
| 59,533,661,229,096,690,000,000,000,000,000,000,000 | 4 |
Make sure to handle setuid() return code properly.
In some tools the return value of setuid() is not checked. In the worst
case this could lead to privilege escalation since the process does not
give up its root privileges and continue as root.
|
static void airo_process_scan_results (struct airo_info *ai) {
union iwreq_data wrqu;
BSSListRid bss;
int rc;
BSSListElement * loop_net;
BSSListElement * tmp_net;
/* Blow away current list of scan results */
list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
list_move_tail (&loop_net->list, &ai->network_free_list);
/* Don't blow away ->list, just BSS data */
memset (loop_net, 0, sizeof (loop_net->bss));
}
/* Try to read the first entry of the scan result */
rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
if((rc) || (bss.index == cpu_to_le16(0xffff))) {
/* No scan results */
goto out;
}
/* Read and parse all entries */
tmp_net = NULL;
while((!rc) && (bss.index != cpu_to_le16(0xffff))) {
/* Grab a network off the free list */
if (!list_empty(&ai->network_free_list)) {
tmp_net = list_entry(ai->network_free_list.next,
BSSListElement, list);
list_del(ai->network_free_list.next);
}
if (tmp_net != NULL) {
memcpy(tmp_net, &bss, sizeof(tmp_net->bss));
list_add_tail(&tmp_net->list, &ai->network_list);
tmp_net = NULL;
}
/* Read next entry */
rc = PC4500_readrid(ai, ai->bssListNext,
&bss, ai->bssListRidLen, 0);
}
out:
ai->scan_timeout = 0;
clear_bit(JOB_SCAN_RESULTS, &ai->jobs);
up(&ai->sem);
/* Send an empty event to user space.
* We don't send the received data on
* the event because it would require
* us to do complex transcoding, and
* we want to minimise the work done in
* the irq handler. Use a request to
* extract the data - Jean II */
wrqu.data.length = 0;
wrqu.data.flags = 0;
wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL);
}
| 0 |
[
"CWE-703",
"CWE-264"
] |
linux
|
550fd08c2cebad61c548def135f67aba284c6162
| 75,952,076,789,841,470,000,000,000,000,000,000,000 | 58 |
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar)
{
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
int i;
for (i = 0; i < ATH10K_USB_PIPE_MAX; i++)
ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]);
}
| 0 |
[
"CWE-476"
] |
linux
|
bfd6e6e6c5d2ee43a3d9902b36e01fc7527ebb27
| 267,677,089,777,521,900,000,000,000,000,000,000,000 | 8 |
ath10k: Fix a NULL-ptr-deref bug in ath10k_usb_alloc_urb_from_pipe
The `ar_usb` field of `ath10k_usb_pipe_usb_pipe` objects
are initialized to point to the containing `ath10k_usb` object
according to endpoint descriptors read from the device side, as shown
below in `ath10k_usb_setup_pipe_resources`:
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
// get the address from endpoint descriptor
pipe_num = ath10k_usb_get_logical_pipe_num(ar_usb,
endpoint->bEndpointAddress,
&urbcount);
......
// select the pipe object
pipe = &ar_usb->pipes[pipe_num];
// initialize the ar_usb field
pipe->ar_usb = ar_usb;
}
The driver assumes that the addresses reported in endpoint
descriptors from device side to be complete. If a device is
malicious and does not report complete addresses, it may trigger
NULL-ptr-deref `ath10k_usb_alloc_urb_from_pipe` and
`ath10k_usb_free_urb_to_pipe`.
This patch fixes the bug by preventing potential NULL-ptr-deref.
Signed-off-by: Hui Peng <[email protected]>
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
[groeck: Add driver tag to subject, fix build warning]
Signed-off-by: Guenter Roeck <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
hivex_open (const char *filename, int flags)
{
hive_h *h = NULL;
assert (sizeof (struct ntreg_header) == 0x1000);
assert (offsetof (struct ntreg_header, csum) == 0x1fc);
h = calloc (1, sizeof *h);
if (h == NULL)
goto error;
h->msglvl = flags & HIVEX_OPEN_MSGLVL_MASK;
const char *debug = getenv ("HIVEX_DEBUG");
if (debug && STREQ (debug, "1"))
h->msglvl = 2;
DEBUG (2, "created handle %p", h);
h->writable = !!(flags & HIVEX_OPEN_WRITE);
h->filename = strdup (filename);
if (h->filename == NULL)
goto error;
#ifdef O_CLOEXEC
h->fd = open (filename, O_RDONLY | O_CLOEXEC | O_BINARY);
#else
h->fd = open (filename, O_RDONLY | O_BINARY);
#endif
if (h->fd == -1)
goto error;
#ifndef O_CLOEXEC
fcntl (h->fd, F_SETFD, FD_CLOEXEC);
#endif
struct stat statbuf;
if (fstat (h->fd, &statbuf) == -1)
goto error;
h->size = statbuf.st_size;
if (h->size < 0x2000) {
SET_ERRNO (EINVAL,
"%s: file is too small to be a Windows NT Registry hive file",
filename);
goto error;
}
if (!h->writable) {
h->addr = mmap (NULL, h->size, PROT_READ, MAP_SHARED, h->fd, 0);
if (h->addr == MAP_FAILED)
goto error;
DEBUG (2, "mapped file at %p", h->addr);
} else {
h->addr = malloc (h->size);
if (h->addr == NULL)
goto error;
if (full_read (h->fd, h->addr, h->size) < h->size)
goto error;
/* We don't need the file descriptor along this path, since we
* have read all the data.
*/
if (close (h->fd) == -1)
goto error;
h->fd = -1;
}
/* Check header. */
if (h->hdr->magic[0] != 'r' ||
h->hdr->magic[1] != 'e' ||
h->hdr->magic[2] != 'g' ||
h->hdr->magic[3] != 'f') {
SET_ERRNO (ENOTSUP,
"%s: not a Windows NT Registry hive file", filename);
goto error;
}
/* Check major version. */
uint32_t major_ver = le32toh (h->hdr->major_ver);
if (major_ver != 1) {
SET_ERRNO (ENOTSUP,
"%s: hive file major version %" PRIu32 " (expected 1)",
filename, major_ver);
goto error;
}
h->bitmap = calloc (1 + h->size / 32, 1);
if (h->bitmap == NULL)
goto error;
/* Header checksum. */
uint32_t sum = header_checksum (h);
if (sum != le32toh (h->hdr->csum)) {
SET_ERRNO (EINVAL, "%s: bad checksum in hive header", filename);
goto error;
}
/* Last modified time. */
h->last_modified = le64toh ((int64_t) h->hdr->last_modified);
if (h->msglvl >= 2) {
char *name = _hivex_windows_utf16_to_utf8 (h->hdr->name, 64);
fprintf (stderr,
"hivex_open: header fields:\n"
" file version %" PRIu32 ".%" PRIu32 "\n"
" sequence nos %" PRIu32 " %" PRIu32 "\n"
" (sequences nos should match if hive was synched at shutdown)\n"
" last modified %" PRIu64 "\n"
" (Windows filetime, x 100 ns since 1601-01-01)\n"
" original file name %s\n"
" (only 32 chars are stored, name is probably truncated)\n"
" root offset 0x%x + 0x1000\n"
" end of last page 0x%x + 0x1000 (total file size 0x%zx)\n"
" checksum 0x%x (calculated 0x%x)\n",
major_ver, le32toh (h->hdr->minor_ver),
le32toh (h->hdr->sequence1), le32toh (h->hdr->sequence2),
h->last_modified,
name ? name : "(conversion failed)",
le32toh (h->hdr->offset),
le32toh (h->hdr->blocks), h->size,
le32toh (h->hdr->csum), sum);
free (name);
}
h->rootoffs = le32toh (h->hdr->offset) + 0x1000;
h->endpages = le32toh (h->hdr->blocks) + 0x1000;
DEBUG (2, "root offset = 0x%zx", h->rootoffs);
/* We'll set this flag when we see a block with the root offset (ie.
* the root block).
*/
int seen_root_block = 0, bad_root_block = 0;
/* Collect some stats. */
size_t pages = 0; /* Number of hbin pages read. */
size_t smallest_page = SIZE_MAX, largest_page = 0;
size_t blocks = 0; /* Total number of blocks found. */
size_t smallest_block = SIZE_MAX, largest_block = 0, blocks_bytes = 0;
size_t used_blocks = 0; /* Total number of used blocks found. */
size_t used_size = 0; /* Total size (bytes) of used blocks. */
/* Read the pages and blocks. The aim here is to be robust against
* corrupt or malicious registries. So we make sure the loops
* always make forward progress. We add the address of each block
* we read to a hash table so pointers will only reference the start
* of valid blocks.
*/
size_t off;
struct ntreg_hbin_page *page;
for (off = 0x1000; off < h->size; off += le32toh (page->page_size)) {
if (off >= h->endpages)
break;
page = (struct ntreg_hbin_page *) ((char *) h->addr + off);
if (page->magic[0] != 'h' ||
page->magic[1] != 'b' ||
page->magic[2] != 'i' ||
page->magic[3] != 'n') {
SET_ERRNO (ENOTSUP,
"%s: trailing garbage at end of file "
"(at 0x%zx, after %zu pages)",
filename, off, pages);
goto error;
}
size_t page_size = le32toh (page->page_size);
DEBUG (2, "page at 0x%zx, size %zu", off, page_size);
pages++;
if (page_size < smallest_page) smallest_page = page_size;
if (page_size > largest_page) largest_page = page_size;
if (page_size <= sizeof (struct ntreg_hbin_page) ||
(page_size & 0x0fff) != 0) {
SET_ERRNO (ENOTSUP,
"%s: page size %zu at 0x%zx, bad registry",
filename, page_size, off);
goto error;
}
/* Read the blocks in this page. */
size_t blkoff;
struct ntreg_hbin_block *block;
size_t seg_len;
for (blkoff = off + 0x20;
blkoff < off + page_size;
blkoff += seg_len) {
blocks++;
int is_root = blkoff == h->rootoffs;
if (is_root)
seen_root_block = 1;
block = (struct ntreg_hbin_block *) ((char *) h->addr + blkoff);
int used;
seg_len = block_len (h, blkoff, &used);
if (seg_len <= 4 || (seg_len & 3) != 0) {
SET_ERRNO (ENOTSUP,
"%s: block size %" PRIu32 " at 0x%zx, bad registry",
filename, le32toh (block->seg_len), blkoff);
goto error;
}
if (h->msglvl >= 2) {
unsigned char *id = (unsigned char *) block->id;
int id0 = id[0], id1 = id[1];
fprintf (stderr, "%s: %s: "
"%s block id %d,%d (%c%c) at 0x%zx size %zu%s\n",
"hivex", __func__,
used ? "used" : "free",
id0, id1,
c_isprint (id0) ? id0 : '.',
c_isprint (id1) ? id1 : '.',
blkoff,
seg_len, is_root ? " (root)" : "");
}
blocks_bytes += seg_len;
if (seg_len < smallest_block) smallest_block = seg_len;
if (seg_len > largest_block) largest_block = seg_len;
if (is_root && !used)
bad_root_block = 1;
if (used) {
used_blocks++;
used_size += seg_len;
/* Root block must be an nk-block. */
if (is_root && (block->id[0] != 'n' || block->id[1] != 'k'))
bad_root_block = 1;
/* Note this blkoff is a valid address. */
BITMAP_SET (h->bitmap, blkoff);
}
}
}
if (!seen_root_block) {
SET_ERRNO (ENOTSUP, "%s: no root block found", filename);
goto error;
}
if (bad_root_block) {
SET_ERRNO (ENOTSUP, "%s: bad root block (free or not nk)", filename);
goto error;
}
DEBUG (1, "successfully read Windows Registry hive file:\n"
" pages: %zu [sml: %zu, lge: %zu]\n"
" blocks: %zu [sml: %zu, avg: %zu, lge: %zu]\n"
" blocks used: %zu\n"
" bytes used: %zu",
pages, smallest_page, largest_page,
blocks, smallest_block, blocks_bytes / blocks, largest_block,
used_blocks, used_size);
return h;
error:;
int err = errno;
if (h) {
free (h->bitmap);
if (h->addr && h->size && h->addr != MAP_FAILED) {
if (!h->writable)
munmap (h->addr, h->size);
else
free (h->addr);
}
if (h->fd >= 0)
close (h->fd);
free (h->filename);
free (h);
}
errno = err;
return NULL;
}
| 1 |
[
"CWE-119"
] |
hivex
|
4bbdf555f88baeae0fa804a369a81a83908bd705
| 333,121,745,700,717,530,000,000,000,000,000,000,000 | 282 |
handle: Check that pages do not extend beyond the end of the file.
Thanks: Mahmoud Al-Qudsi
|
XkbSizeKeyTypes(XkbDescPtr xkb, xkbGetMapReply * rep)
{
XkbKeyTypeRec *type;
unsigned i, len;
len = 0;
if (((rep->present & XkbKeyTypesMask) == 0) || (rep->nTypes < 1) ||
(!xkb) || (!xkb->map) || (!xkb->map->types)) {
rep->present &= ~XkbKeyTypesMask;
rep->firstType = rep->nTypes = 0;
return 0;
}
type = &xkb->map->types[rep->firstType];
for (i = 0; i < rep->nTypes; i++, type++) {
len += SIZEOF(xkbKeyTypeWireDesc);
if (type->map_count > 0) {
len += (type->map_count * SIZEOF(xkbKTMapEntryWireDesc));
if (type->preserve)
len += (type->map_count * SIZEOF(xkbModsWireDesc));
}
}
return len;
}
| 0 |
[
"CWE-119"
] |
xserver
|
f7cd1276bbd4fe3a9700096dec33b52b8440788d
| 46,575,533,828,339,410,000,000,000,000,000,000,000 | 23 |
Correct bounds checking in XkbSetNames()
CVE-2020-14345 / ZDI 11428
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]>
|
uint64_t LUKS2_keyslots_size(struct luks2_hdr *hdr)
{
return LUKS2_keyslots_size_jobj(hdr->jobj);
}
| 0 |
[
"CWE-787"
] |
cryptsetup
|
52f5cb8cedf22fb3e14c744814ec8af7614146c7
| 221,717,088,703,356,050,000,000,000,000,000,000,000 | 4 |
Check segment gaps regardless of heap space.
Segments are validated in hdr_validate_segments. Gaps in segment keys
are detected when collecting offsets. But if an invalid segment is very
large, larger than count, it could happen that cryptsetup is unable to
allocate enough memory, not giving a clue about what actually is the
problem.
Therefore check for gaps even if not enough memory is available. This
gives much more information with debug output enabled.
Obviously cryptsetup still fails if segments are perfectly fine but not
enough RAM available. But at that stage, the user knows that it's the
fault of the system, not of an invalid segment.
|
TEST(IndexBoundsBuilderTest, TranslateGteMaxKey) {
auto testIndex = buildSimpleIndexEntry();
BSONObj obj = BSON("a" << BSON("$gte" << MAXKEY));
auto expr = parseMatchExpression(obj);
BSONElement elt = obj.firstElement();
OrderedIntervalList oil;
IndexBoundsBuilder::BoundsTightness tightness;
IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
ASSERT_EQUALS(oil.name, "a");
ASSERT_EQUALS(oil.intervals.size(), 1U);
ASSERT_EQUALS(oil.intervals[0].toString(), "[MaxKey, MaxKey]");
ASSERT_TRUE(oil.intervals[0].startInclusive);
ASSERT_TRUE(oil.intervals[0].endInclusive);
ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
}
| 0 |
[
"CWE-754"
] |
mongo
|
f8f55e1825ee5c7bdb3208fc7c5b54321d172732
| 32,896,292,034,751,820,000,000,000,000,000,000,000 | 15 |
SERVER-44377 generate correct plan for indexed inequalities to null
|
QString Helper::temporaryMountDevice(const QString &device, const QString &name, bool readonly)
{
QString mount_point = mountPoint(device);
if (!mount_point.isEmpty())
return mount_point;
mount_point = "%1/.%2/mount/%3";
const QStringList &tmp_paths = QStandardPaths::standardLocations(QStandardPaths::TempLocation);
mount_point = mount_point.arg(tmp_paths.isEmpty() ? "/tmp" : tmp_paths.first()).arg(qApp->applicationName()).arg(name);
if (!QDir::current().mkpath(mount_point)) {
dCError("mkpath \"%s\" failed", qPrintable(mount_point));
return QString();
}
if (!mountDevice(device, mount_point, readonly)) {
dCError("Mount the device \"%s\" to \"%s\" failed", qPrintable(device), qPrintable(mount_point));
return QString();
}
return mount_point;
}
| 1 |
[
"CWE-59",
"CWE-61"
] |
deepin-clone
|
e079f3e2712b4f8c28e3e63e71ba1a1f90fce1ab
| 201,571,069,556,954,040,000,000,000,000,000,000,000 | 26 |
fix: Do not use the "/tmp" directory
https://github.com/linuxdeepin/deepin-clone/issues/16
https://bugzilla.opensuse.org/show_bug.cgi?id=1130388
|
njs_vmcode_try_start(njs_vm_t *vm, njs_value_t *exception_value,
njs_value_t *offset, u_char *pc)
{
njs_value_t *exit_value;
njs_frame_t *frame;
njs_exception_t *e;
njs_vmcode_try_start_t *try_start;
frame = (njs_frame_t *) vm->top_frame;
if (frame->exception.catch != NULL) {
e = njs_mp_alloc(vm->mem_pool, sizeof(njs_exception_t));
if (njs_slow_path(e == NULL)) {
njs_memory_error(vm);
return NJS_ERROR;
}
*e = frame->exception;
frame->exception.next = e;
}
frame->exception.catch = pc + (njs_jump_off_t) offset;
njs_set_invalid(exception_value);
try_start = (njs_vmcode_try_start_t *) pc;
exit_value = njs_scope_value(vm, try_start->exit_value);
njs_set_invalid(exit_value);
njs_number(exit_value) = 0;
return sizeof(njs_vmcode_try_start_t);
}
| 0 |
[
"CWE-703",
"CWE-754"
] |
njs
|
222d6fdcf0c6485ec8e175f3a7b70d650c234b4e
| 178,167,896,682,624,000,000,000,000,000,000,000,000 | 33 |
Fixed njs_vmcode_interpreter() when "toString" conversion fails.
Previously, while interpreting a user function, njs_vmcode_interpreter()
might return prematurely when an error happens. This is not correct
because the current frame has to be unwound (or exception caught)
first.
The fix is exit through only 5 appropriate exit points to ensure
proper unwinding.
This closes #467 issue on Github.
|
get_unique_target_file (GFile *src,
GFile *dest_dir,
gboolean same_fs,
const char *dest_fs_type,
int count)
{
const char *editname, *end;
char *basename, *new_name;
GFileInfo *info;
GFile *dest;
int max_length;
max_length = get_max_name_length (dest_dir);
dest = NULL;
info = g_file_query_info (src,
G_FILE_ATTRIBUTE_STANDARD_EDIT_NAME,
0, NULL, NULL);
if (info != NULL)
{
editname = g_file_info_get_attribute_string (info, G_FILE_ATTRIBUTE_STANDARD_EDIT_NAME);
if (editname != NULL)
{
new_name = get_duplicate_name (editname, count, max_length);
make_file_name_valid_for_dest_fs (new_name, dest_fs_type);
dest = g_file_get_child_for_display_name (dest_dir, new_name, NULL);
g_free (new_name);
}
g_object_unref (info);
}
if (dest == NULL)
{
basename = g_file_get_basename (src);
if (g_utf8_validate (basename, -1, NULL))
{
new_name = get_duplicate_name (basename, count, max_length);
make_file_name_valid_for_dest_fs (new_name, dest_fs_type);
dest = g_file_get_child_for_display_name (dest_dir, new_name, NULL);
g_free (new_name);
}
if (dest == NULL)
{
end = strrchr (basename, '.');
if (end != NULL)
{
count += atoi (end + 1);
}
new_name = g_strdup_printf ("%s.%d", basename, count);
make_file_name_valid_for_dest_fs (new_name, dest_fs_type);
dest = g_file_get_child (dest_dir, new_name);
g_free (new_name);
}
g_free (basename);
}
return dest;
}
| 0 |
[
"CWE-20"
] |
nautilus
|
1630f53481f445ada0a455e9979236d31a8d3bb0
| 104,752,973,077,681,150,000,000,000,000,000,000,000 | 63 |
mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991
|
int jbd2_journal_extend(handle_t *handle, int nblocks)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal;
int result;
int wanted;
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal;
result = 1;
read_lock(&journal->j_state_lock);
/* Don't extend a locked-down transaction! */
if (transaction->t_state != T_RUNNING) {
jbd_debug(3, "denied handle %p %d blocks: "
"transaction not running\n", handle, nblocks);
goto error_out;
}
spin_lock(&transaction->t_handle_lock);
wanted = atomic_add_return(nblocks,
&transaction->t_outstanding_credits);
if (wanted > journal->j_max_transaction_buffers) {
jbd_debug(3, "denied handle %p %d blocks: "
"transaction too large\n", handle, nblocks);
atomic_sub(nblocks, &transaction->t_outstanding_credits);
goto unlock;
}
if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
jbd2_log_space_left(journal)) {
jbd_debug(3, "denied handle %p %d blocks: "
"insufficient log space\n", handle, nblocks);
atomic_sub(nblocks, &transaction->t_outstanding_credits);
goto unlock;
}
trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
transaction->t_tid,
handle->h_type, handle->h_line_no,
handle->h_buffer_credits,
nblocks);
handle->h_buffer_credits += nblocks;
handle->h_requested_credits += nblocks;
result = 0;
jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
unlock:
spin_unlock(&transaction->t_handle_lock);
error_out:
read_unlock(&journal->j_state_lock);
return result;
}
| 0 |
[
"CWE-787"
] |
linux
|
e09463f220ca9a1a1ecfda84fcda658f99a1f12a
| 134,934,786,260,996,020,000,000,000,000,000,000,000 | 58 |
jbd2: don't mark block as modified if the handle is out of credits
Do not set the b_modified flag in block's journal head should not
until after we're sure that jbd2_journal_dirty_metadat() will not
abort with an error due to there not being enough space reserved in
the jbd2 handle.
Otherwise, future attempts to modify the buffer may lead a large
number of spurious errors and warnings.
This addresses CVE-2018-10883.
https://bugzilla.kernel.org/show_bug.cgi?id=200071
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
decode_stack_action(const struct nx_action_stack *nasp,
const struct vl_mff_map *vl_mff_map, uint64_t *tlv_bitmap,
struct ofpact_stack *stack_action)
{
enum ofperr error;
stack_action->subfield.ofs = ntohs(nasp->offset);
struct ofpbuf b = ofpbuf_const_initializer(nasp, sizeof *nasp);
ofpbuf_pull(&b, OBJECT_OFFSETOF(nasp, pad));
error = mf_vl_mff_nx_pull_header(&b, vl_mff_map,
&stack_action->subfield.field, NULL,
tlv_bitmap);
if (error) {
return error;
}
stack_action->subfield.n_bits = ntohs(*(const ovs_be16 *) b.data);
ofpbuf_pull(&b, 2);
if (!is_all_zeros(b.data, b.size)) {
return OFPERR_NXBRC_MUST_BE_ZERO;
}
return 0;
}
| 0 |
[
"CWE-125"
] |
ovs
|
9237a63c47bd314b807cda0bd2216264e82edbe8
| 315,464,086,734,975,800,000,000,000,000,000,000,000 | 24 |
ofp-actions: Avoid buffer overread in BUNDLE action decoding.
Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]>
|
static struct inotify_kernel_event *get_one_event(struct inotify_device *dev,
size_t count)
{
size_t event_size = sizeof(struct inotify_event);
struct inotify_kernel_event *kevent;
if (list_empty(&dev->events))
return NULL;
kevent = inotify_dev_get_event(dev);
if (kevent->name)
event_size += kevent->event.len;
if (event_size > count)
return ERR_PTR(-EINVAL);
remove_kevent(dev, kevent);
return kevent;
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
3632dee2f8b8a9720329f29eeaa4ec4669a3aff8
| 122,017,204,466,254,050,000,000,000,000,000,000,000 | 19 |
inotify: clean up inotify_read and fix locking problems
If userspace supplies an invalid pointer to a read() of an inotify
instance, the inotify device's event list mutex is unlocked twice.
This causes an unbalance which effectively leaves the data structure
unprotected, and we can trigger oopses by accessing the inotify
instance from different tasks concurrently.
The best fix (contributed largely by Linus) is a total rewrite
of the function in question:
On Thu, Jan 22, 2009 at 7:05 AM, Linus Torvalds wrote:
> The thing to notice is that:
>
> - locking is done in just one place, and there is no question about it
> not having an unlock.
>
> - that whole double-while(1)-loop thing is gone.
>
> - use multiple functions to make nesting and error handling sane
>
> - do error testing after doing the things you always need to do, ie do
> this:
>
> mutex_lock(..)
> ret = function_call();
> mutex_unlock(..)
>
> .. test ret here ..
>
> instead of doing conditional exits with unlocking or freeing.
>
> So if the code is written in this way, it may still be buggy, but at least
> it's not buggy because of subtle "forgot to unlock" or "forgot to free"
> issues.
>
> This _always_ unlocks if it locked, and it always frees if it got a
> non-error kevent.
Cc: John McCutchan <[email protected]>
Cc: Robert Love <[email protected]>
Cc: <[email protected]>
Signed-off-by: Vegard Nossum <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
| 0 |
[
"CWE-399",
"CWE-401"
] |
ImageMagick
|
8a43abefb38c5e29138e1c9c515b313363541c06
| 237,048,996,803,358,420,000,000,000,000,000,000,000 | 36 |
https://github.com/ImageMagick/ImageMagick/issues/1451
|
COMPAT_SYSCALL_DEFINE6(io_pgetevents,
compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { NULL, };
sigset_t ksigmask, sigsaved;
struct timespec64 t;
int ret;
if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
if (ret)
return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
restore_user_sigmask(ksig.sigmask, &sigsaved);
if (signal_pending(current) && !ret)
ret = -ERESTARTNOHAND;
return ret;
}
| 0 |
[
"CWE-416"
] |
linux
|
84c4e1f89fefe70554da0ab33be72c9be7994379
| 2,375,318,418,518,261,000,000,000,000,000,000,000 | 30 |
aio: simplify - and fix - fget/fput for io_submit()
Al Viro root-caused a race where the IOCB_CMD_POLL handling of
fget/fput() could cause us to access the file pointer after it had
already been freed:
"In more details - normally IOCB_CMD_POLL handling looks so:
1) io_submit(2) allocates aio_kiocb instance and passes it to
aio_poll()
2) aio_poll() resolves the descriptor to struct file by req->file =
fget(iocb->aio_fildes)
3) aio_poll() sets ->woken to false and raises ->ki_refcnt of that
aio_kiocb to 2 (bumps by 1, that is).
4) aio_poll() calls vfs_poll(). After sanity checks (basically,
"poll_wait() had been called and only once") it locks the queue.
That's what the extra reference to iocb had been for - we know we
can safely access it.
5) With queue locked, we check if ->woken has already been set to
true (by aio_poll_wake()) and, if it had been, we unlock the
queue, drop a reference to aio_kiocb and bugger off - at that
point it's a responsibility to aio_poll_wake() and the stuff
called/scheduled by it. That code will drop the reference to file
in req->file, along with the other reference to our aio_kiocb.
6) otherwise, we see whether we need to wait. If we do, we unlock the
queue, drop one reference to aio_kiocb and go away - eventual
wakeup (or cancel) will deal with the reference to file and with
the other reference to aio_kiocb
7) otherwise we remove ourselves from waitqueue (still under the
queue lock), so that wakeup won't get us. No async activity will
be happening, so we can safely drop req->file and iocb ourselves.
If wakeup happens while we are in vfs_poll(), we are fine - aio_kiocb
won't get freed under us, so we can do all the checks and locking
safely. And we don't touch ->file if we detect that case.
However, vfs_poll() most certainly *does* touch the file it had been
given. So wakeup coming while we are still in ->poll() might end up
doing fput() on that file. That case is not too rare, and usually we
are saved by the still present reference from descriptor table - that
fput() is not the final one.
But if another thread closes that descriptor right after our fget()
and wakeup does happen before ->poll() returns, we are in trouble -
final fput() done while we are in the middle of a method:
Al also wrote a patch to take an extra reference to the file descriptor
to fix this, but I instead suggested we just streamline the whole file
pointer handling by submit_io() so that the generic aio submission code
simply keeps the file pointer around until the aio has completed.
Fixes: bfe4037e722e ("aio: implement IOCB_CMD_POLL")
Acked-by: Al Viro <[email protected]>
Reported-by: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
TEST_P(WasmTest, StatsHighLevel) {
Stats::IsolatedStoreImpl stats_store;
Api::ApiPtr api = Api::createApiForTest(stats_store);
Upstream::MockClusterManager cluster_manager;
Event::DispatcherPtr dispatcher(api->allocateDispatcher());
auto scope = Stats::ScopeSharedPtr(stats_store.createScope("wasm."));
NiceMock<LocalInfo::MockLocalInfo> local_info;
auto name = "";
auto root_id = "";
auto vm_id = "";
auto vm_configuration = "";
auto plugin = std::make_shared<Extensions::Common::Wasm::Plugin>(
name, root_id, vm_id, envoy::api::v2::core::TrafficDirection::UNSPECIFIED, local_info,
nullptr);
auto wasm = std::make_unique<Extensions::Common::Wasm::Wasm>(
absl::StrCat("envoy.wasm.runtime.", GetParam()), vm_id, vm_configuration, plugin, scope,
cluster_manager, *dispatcher);
EXPECT_NE(wasm, nullptr);
const auto code = TestEnvironment::readFileToStringForTest(TestEnvironment::substitute(
"{{ test_rundir }}/test/extensions/wasm/test_data/stats_cpp.wasm"));
EXPECT_FALSE(code.empty());
auto context = std::make_unique<TestContext>(wasm.get());
context->setInVmContextCreatedForTesting();
EXPECT_CALL(*context, scriptLog_(spdlog::level::trace, Eq("get counter = 1")));
EXPECT_CALL(*context, scriptLog_(spdlog::level::debug, Eq("get counter = 2")));
// recordMetric on a Counter is the same as increment.
EXPECT_CALL(*context, scriptLog_(spdlog::level::info, Eq("get counter = 5")));
EXPECT_CALL(*context, scriptLog_(spdlog::level::warn, Eq("get gauge = 2")));
// Get is not supported on histograms.
// EXPECT_CALL(*context, scriptLog(spdlog::level::err, Eq(std::string("resolved histogram name
// = int_tag.7_string_tag.test_tag.bool_tag.true.test_histogram"))));
EXPECT_CALL(*context,
scriptLog_(spdlog::level::err,
Eq("h_id = int_tag.7.string_tag.test_tag.bool_tag.true.test_histogram")));
EXPECT_CALL(*context, scriptLog_(spdlog::level::err, Eq("stack_c = 1")));
EXPECT_CALL(*context, scriptLog_(spdlog::level::err, Eq("stack_g = 2")));
// Get is not supported on histograms.
// EXPECT_CALL(*context, scriptLog_(spdlog::level::err, Eq("stack_h = 3")));
EXPECT_TRUE(wasm->initialize(code, false));
wasm->setContext(context.get());
context->onLog();
}
| 0 |
[
"CWE-476"
] |
envoy
|
8788a3cf255b647fd14e6b5e2585abaaedb28153
| 116,125,066,204,795,320,000,000,000,000,000,000,000 | 43 |
1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]>
|
\param step_frame Step applied between each frame.
\param yuv2rgb Apply YUV to RGB transformation during reading.
**/
CImgList<T>& load_yuv(const char *const filename,
const unsigned int size_x, const unsigned int size_y,
const unsigned int chroma_subsampling=444,
const unsigned int first_frame=0, const unsigned int last_frame=~0U,
const unsigned int step_frame=1, const bool yuv2rgb=true) {
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 14,184,587,765,464,056,000,000,000,000,000,000,000 | 8 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
void ShowConciseImageInfo(void)
{
printf("\"%s\"",ImageInfo.FileName);
printf(" %dx%d",ImageInfo.Width, ImageInfo.Height);
if (ImageInfo.ExposureTime){
if (ImageInfo.ExposureTime <= 0.5){
printf(" (1/%d)",(int)(0.5 + 1/ImageInfo.ExposureTime));
}else{
printf(" (%1.1f)",ImageInfo.ExposureTime);
}
}
if (ImageInfo.ApertureFNumber){
printf(" f/%3.1f",(double)ImageInfo.ApertureFNumber);
}
if (ImageInfo.FocalLength35mmEquiv){
printf(" f(35)=%dmm",ImageInfo.FocalLength35mmEquiv);
}
if (ImageInfo.FlashUsed >= 0 && ImageInfo.FlashUsed & 1){
printf(" (flash)");
}
if (ImageInfo.IsColor == 0){
printf(" (bw)");
}
printf("\n");
}
| 0 |
[
"CWE-703"
] |
jhead
|
a50953a266583981b51a181c2fce73dad2ac5d7d
| 29,435,661,497,519,904,000,000,000,000,000,000,000 | 32 |
Make pointer range checks more consistent.
Also twiddle the unused floating point print code (not used in real exif files),
but fuzz testing hits it. New code is equivalent but doesn't cause bus error (don't understand why, but this is all a very bogus thing anyway, just trying to avoid fuzz testing hits.
|
RCoreSymCacheElement *r_coresym_cache_element_new(RBinFile *bf, RBuffer *buf, ut64 off, int bits, char * file_name) {
RCoreSymCacheElement *result = NULL;
ut8 *b = NULL;
RCoreSymCacheElementHdr *hdr = r_coresym_cache_element_header_new (buf, off, bits);
if (!hdr) {
return NULL;
}
if (hdr->version != 1) {
eprintf ("Unsupported CoreSymbolication cache version (%d)\n", hdr->version);
goto beach;
}
if (hdr->size == 0 || hdr->size > r_buf_size (buf) - off) {
eprintf ("Corrupted CoreSymbolication header: size out of bounds (0x%x)\n", hdr->size);
goto beach;
}
result = R_NEW0 (RCoreSymCacheElement);
if (!result) {
goto beach;
}
result->hdr = hdr;
b = malloc (hdr->size);
if (!b) {
goto beach;
}
if (r_buf_read_at (buf, off, b, hdr->size) != hdr->size) {
goto beach;
}
ut8 *end = b + hdr->size;
if (file_name) {
result->file_name = file_name;
} else if (hdr->file_name_off) {
result->file_name = str_dup_safe (b, b + (size_t)hdr->file_name_off, end);
}
if (hdr->version_off) {
result->binary_version = str_dup_safe (b, b + (size_t)hdr->version_off, end);
}
const size_t word_size = bits / 8;
const ut64 start_of_sections = (ut64)hdr->n_segments * R_CS_EL_SIZE_SEG + R_CS_EL_OFF_SEGS;
const ut64 sect_size = (bits == 32) ? R_CS_EL_SIZE_SECT_32 : R_CS_EL_SIZE_SECT_64;
const ut64 start_of_symbols = start_of_sections + (ut64)hdr->n_sections * sect_size;
const ut64 start_of_lined_symbols = start_of_symbols + (ut64)hdr->n_symbols * R_CS_EL_SIZE_SYM;
const ut64 start_of_line_info = start_of_lined_symbols + (ut64)hdr->n_lined_symbols * R_CS_EL_SIZE_LSYM;
const ut64 start_of_unknown_pairs = start_of_line_info + (ut64)hdr->n_line_info * R_CS_EL_SIZE_LINFO;
const ut64 start_of_strings = start_of_unknown_pairs + (ut64)hdr->n_symbols * 8;
ut64 page_zero_size = 0;
size_t page_zero_idx = 0;
if (UT32_MUL_OVFCHK (hdr->n_segments, sizeof (RCoreSymCacheElementSegment))) {
goto beach;
} else if (UT32_MUL_OVFCHK (hdr->n_sections, sizeof (RCoreSymCacheElementSection))) {
goto beach;
} else if (UT32_MUL_OVFCHK (hdr->n_symbols, sizeof (RCoreSymCacheElementSymbol))) {
goto beach;
} else if (UT32_MUL_OVFCHK (hdr->n_lined_symbols, sizeof (RCoreSymCacheElementLinedSymbol))) {
goto beach;
} else if (UT32_MUL_OVFCHK (hdr->n_line_info, sizeof (RCoreSymCacheElementLineInfo))) {
goto beach;
}
if (hdr->n_segments > 0) {
result->segments = R_NEWS0 (RCoreSymCacheElementSegment, hdr->n_segments);
if (!result->segments) {
goto beach;
}
size_t i;
ut8 *cursor = b + R_CS_EL_OFF_SEGS;
for (i = 0; i < hdr->n_segments && cursor + sizeof (RCoreSymCacheElementSegment) < end; i++) {
RCoreSymCacheElementSegment *seg = &result->segments[i];
seg->paddr = seg->vaddr = r_read_le64 (cursor);
cursor += 8;
if (cursor >= end) {
break;
}
seg->size = seg->vsize = r_read_le64 (cursor);
cursor += 8;
if (cursor >= end) {
break;
}
seg->name = str_dup_safe_fixed (b, cursor, 16, end);
cursor += 16;
if (!seg->name) {
continue;
}
if (!strcmp (seg->name, "__PAGEZERO")) {
page_zero_size = seg->size;
page_zero_idx = i;
seg->paddr = seg->vaddr = 0;
seg->size = 0;
}
}
for (i = 0; i < hdr->n_segments && page_zero_size > 0; i++) {
if (i == page_zero_idx) {
continue;
}
RCoreSymCacheElementSegment *seg = &result->segments[i];
if (seg->vaddr < page_zero_size) {
seg->vaddr += page_zero_size;
}
}
}
bool relative_to_strings = false;
ut8* string_origin;
if (hdr->n_sections > 0) {
result->sections = R_NEWS0 (RCoreSymCacheElementSection, hdr->n_sections);
if (!result->sections) {
goto beach;
}
size_t i;
ut8 *cursor = b + start_of_sections;
for (i = 0; i < hdr->n_sections && cursor < end; i++) {
ut8 *sect_start = cursor;
RCoreSymCacheElementSection *sect = &result->sections[i];
sect->vaddr = sect->paddr = r_read_ble (cursor, false, bits);
if (sect->vaddr < page_zero_size) {
sect->vaddr += page_zero_size;
}
cursor += word_size;
if (cursor >= end) {
break;
}
sect->size = r_read_ble (cursor, false, bits);
cursor += word_size;
if (cursor >= end) {
break;
}
ut64 sect_name_off = r_read_ble (cursor, false, bits);
if (!i && !sect_name_off) {
relative_to_strings = true;
}
cursor += word_size;
if (bits == 32) {
cursor += word_size;
}
string_origin = relative_to_strings? b + start_of_strings : sect_start;
sect->name = str_dup_safe (b, string_origin + (size_t)sect_name_off, end);
}
}
if (hdr->n_symbols) {
result->symbols = R_NEWS0 (RCoreSymCacheElementSymbol, hdr->n_symbols);
if (!result->symbols) {
goto beach;
}
size_t i;
ut8 *cursor = b + start_of_symbols;
for (i = 0; i < hdr->n_symbols && cursor + R_CS_EL_SIZE_SYM <= end; i++) {
RCoreSymCacheElementSymbol *sym = &result->symbols[i];
sym->paddr = r_read_le32 (cursor);
sym->size = r_read_le32 (cursor + 0x4);
sym->unk1 = r_read_le32 (cursor + 0x8);
size_t name_off = r_read_le32 (cursor + 0xc);
size_t mangled_name_off = r_read_le32 (cursor + 0x10);
sym->unk2 = (st32)r_read_le32 (cursor + 0x14);
string_origin = relative_to_strings? b + start_of_strings : cursor;
sym->name = str_dup_safe (b, string_origin + name_off, end);
if (!sym->name) {
cursor += R_CS_EL_SIZE_SYM;
continue;
}
string_origin = relative_to_strings? b + start_of_strings : cursor;
sym->mangled_name = str_dup_safe (b, string_origin + mangled_name_off, end);
if (!sym->mangled_name) {
cursor += R_CS_EL_SIZE_SYM;
continue;
}
cursor += R_CS_EL_SIZE_SYM;
}
}
if (hdr->n_lined_symbols) {
result->lined_symbols = R_NEWS0 (RCoreSymCacheElementLinedSymbol, hdr->n_lined_symbols);
if (!result->lined_symbols) {
goto beach;
}
size_t i;
ut8 *cursor = b + start_of_lined_symbols;
for (i = 0; i < hdr->n_lined_symbols && cursor + R_CS_EL_SIZE_LSYM <= end; i++) {
RCoreSymCacheElementLinedSymbol *lsym = &result->lined_symbols[i];
lsym->sym.paddr = r_read_le32 (cursor);
lsym->sym.size = r_read_le32 (cursor + 0x4);
lsym->sym.unk1 = r_read_le32 (cursor + 0x8);
size_t name_off = r_read_le32 (cursor + 0xc);
size_t mangled_name_off = r_read_le32 (cursor + 0x10);
lsym->sym.unk2 = (st32)r_read_le32 (cursor + 0x14);
size_t file_name_off = r_read_le32 (cursor + 0x18);
lsym->flc.line = r_read_le32 (cursor + 0x1c);
lsym->flc.col = r_read_le32 (cursor + 0x20);
string_origin = relative_to_strings? b + start_of_strings : cursor;
lsym->sym.name = str_dup_safe (b, string_origin + name_off, end);
if (!lsym->sym.name) {
cursor += R_CS_EL_SIZE_LSYM;
continue;
}
string_origin = relative_to_strings? b + start_of_strings : cursor;
lsym->sym.mangled_name = str_dup_safe (b, string_origin + mangled_name_off, end);
if (!lsym->sym.mangled_name) {
cursor += R_CS_EL_SIZE_LSYM;
continue;
}
string_origin = relative_to_strings? b + start_of_strings : cursor;
lsym->flc.file = str_dup_safe (b, string_origin + file_name_off, end);
if (!lsym->flc.file) {
cursor += R_CS_EL_SIZE_LSYM;
continue;
}
cursor += R_CS_EL_SIZE_LSYM;
meta_add_fileline (bf, r_coresym_cache_element_pa2va (result, lsym->sym.paddr), lsym->sym.size, &lsym->flc);
}
}
if (hdr->n_line_info) {
result->line_info = R_NEWS0 (RCoreSymCacheElementLineInfo, hdr->n_line_info);
if (!result->line_info) {
goto beach;
}
size_t i;
ut8 *cursor = b + start_of_line_info;
for (i = 0; i < hdr->n_line_info && cursor + R_CS_EL_SIZE_LINFO <= end; i++) {
RCoreSymCacheElementLineInfo *info = &result->line_info[i];
info->paddr = r_read_le32 (cursor);
info->size = r_read_le32 (cursor + 4);
size_t file_name_off = r_read_le32 (cursor + 8);
info->flc.line = r_read_le32 (cursor + 0xc);
info->flc.col = r_read_le32 (cursor + 0x10);
string_origin = relative_to_strings? b + start_of_strings : cursor;
info->flc.file = str_dup_safe (b, string_origin + file_name_off, end);
if (!info->flc.file) {
break;
}
cursor += R_CS_EL_SIZE_LINFO;
meta_add_fileline (bf, r_coresym_cache_element_pa2va (result, info->paddr), info->size, &info->flc);
}
}
/*
* TODO:
* Figure out the meaning of the 2 arrays of hdr->n_symbols
* 32-bit integers located at the end of line info.
* Those are the last info before the strings at the end.
*/
beach:
free (b);
return result;
}
| 1 |
[
"CWE-787"
] |
radare2
|
1dd65336f0f0c351d6ea853efcf73cf9c0030862
| 257,777,219,606,277,300,000,000,000,000,000,000,000 | 242 |
Fix oobread bug in NE parser ##crash
* Reported by @cnitlrt via huntrdev
* BountyID: 02b4b563-b946-4343-9092-38d1c5cd60c9
* Reproducer: neoobread
|
reverse_icmp_type(uint8_t type)
{
switch (type) {
case ICMP4_ECHO_REQUEST:
return ICMP4_ECHO_REPLY;
case ICMP4_ECHO_REPLY:
return ICMP4_ECHO_REQUEST;
case ICMP4_TIMESTAMP:
return ICMP4_TIMESTAMPREPLY;
case ICMP4_TIMESTAMPREPLY:
return ICMP4_TIMESTAMP;
case ICMP4_INFOREQUEST:
return ICMP4_INFOREPLY;
case ICMP4_INFOREPLY:
return ICMP4_INFOREQUEST;
default:
OVS_NOT_REACHED();
}
}
| 0 |
[
"CWE-400"
] |
ovs
|
79349cbab0b2a755140eedb91833ad2760520a83
| 70,696,556,421,886,460,000,000,000,000,000,000,000 | 21 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
static void module_unload_free(struct module *mod)
{
struct module_use *use, *tmp;
mutex_lock(&module_mutex);
list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
struct module *i = use->target;
pr_debug("%s unusing %s\n", mod->name, i->name);
module_put(i);
list_del(&use->source_list);
list_del(&use->target_list);
kfree(use);
}
mutex_unlock(&module_mutex);
}
| 0 |
[
"CWE-362",
"CWE-347"
] |
linux
|
0c18f29aae7ce3dadd26d8ee3505d07cc982df75
| 19,165,196,408,320,943,000,000,000,000,000,000,000 | 15 |
module: limit enabling module.sig_enforce
Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying
"module.sig_enforce=1" on the boot command line sets "sig_enforce".
Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured.
This patch makes the presence of /sys/module/module/parameters/sig_enforce
dependent on CONFIG_MODULE_SIG=y.
Fixes: fda784e50aac ("module: export module signature enforcement status")
Reported-by: Nayna Jain <[email protected]>
Tested-by: Mimi Zohar <[email protected]>
Tested-by: Jessica Yu <[email protected]>
Signed-off-by: Mimi Zohar <[email protected]>
Signed-off-by: Jessica Yu <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void DL_Dxf::addText(DL_CreationInterface* creationInterface) {
DL_TextData d(
// insertion point
getRealValue(10, 0.0),
getRealValue(20, 0.0),
getRealValue(30, 0.0),
// alignment point
getRealValue(11, DL_NANDOUBLE),
getRealValue(21, DL_NANDOUBLE),
getRealValue(31, DL_NANDOUBLE),
// height
getRealValue(40, 2.5),
// x scale
getRealValue(41, 1.0),
// generation flags
getIntValue(71, 0),
// h just
getIntValue(72, 0),
// v just
getIntValue(73, 0),
// text
getStringValue(1, ""),
// style
getStringValue(7, ""),
// angle
(getRealValue(50, 0.0)*2*M_PI)/360.0);
creationInterface->addText(d);
}
| 0 |
[
"CWE-191"
] |
qcad
|
1eeffc5daf5a06cf6213ffc19e95923cdebb2eb8
| 335,914,719,580,112,830,000,000,000,000,000,000,000 | 29 |
check vertexIndex which might be -1 for broken DXF
|
static signed char inf() { return max(); }
static signed char cut(const double val) {
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 177,674,848,908,020,600,000,000,000,000,000,000,000 | 2 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
aiff_read_header (SF_PRIVATE *psf, COMM_CHUNK *comm_fmt)
{ SSND_CHUNK ssnd_fmt ;
AIFF_PRIVATE *paiff ;
BUF_UNION ubuf ;
uint32_t chunk_size = 0, FORMsize, SSNDsize, bytesread, mark_count = 0 ;
int k, found_chunk = 0, done = 0, error = 0 ;
char *cptr ;
int instr_found = 0, mark_found = 0 ;
if (psf->filelength > SF_PLATFORM_S64 (0xffffffff))
psf_log_printf (psf, "Warning : filelength > 0xffffffff. This is bad!!!!\n") ;
if ((paiff = psf->container_data) == NULL)
return SFE_INTERNAL ;
paiff->comm_offset = 0 ;
paiff->ssnd_offset = 0 ;
/* Set position to start of file to begin reading header. */
psf_binheader_readf (psf, "p", 0) ;
memset (comm_fmt, 0, sizeof (COMM_CHUNK)) ;
/* Until recently AIF* file were all BIG endian. */
psf->endian = SF_ENDIAN_BIG ;
/* AIFF files can apparently have their chunks in any order. However, they
** must have a FORM chunk. Approach here is to read all the chunks one by
** one and then check for the mandatory chunks at the end.
*/
while (! done)
{ unsigned marker ;
size_t jump = chunk_size & 1 ;
marker = chunk_size = 0 ;
psf_binheader_readf (psf, "Ejm4", jump, &marker, &chunk_size) ;
if (marker == 0)
{ sf_count_t pos = psf_ftell (psf) ;
psf_log_printf (psf, "Have 0 marker at position %D (0x%x).\n", pos, pos) ;
break ;
} ;
if (psf->file.mode == SFM_RDWR && (found_chunk & HAVE_SSND))
return SFE_AIFF_RW_SSND_NOT_LAST ;
psf_store_read_chunk_u32 (&psf->rchunks, marker, psf_ftell (psf), chunk_size) ;
switch (marker)
{ case FORM_MARKER :
if (found_chunk)
return SFE_AIFF_NO_FORM ;
FORMsize = chunk_size ;
found_chunk |= HAVE_FORM ;
psf_binheader_readf (psf, "m", &marker) ;
switch (marker)
{ case AIFC_MARKER :
case AIFF_MARKER :
found_chunk |= (marker == AIFC_MARKER) ? (HAVE_AIFC | HAVE_AIFF) : HAVE_AIFF ;
break ;
default :
break ;
} ;
if (psf->fileoffset > 0 && psf->filelength > FORMsize + 8)
{ /* Set file length. */
psf->filelength = FORMsize + 8 ;
psf_log_printf (psf, "FORM : %u\n %M\n", FORMsize, marker) ;
}
else if (FORMsize != psf->filelength - 2 * SIGNED_SIZEOF (chunk_size))
{ chunk_size = psf->filelength - 2 * sizeof (chunk_size) ;
psf_log_printf (psf, "FORM : %u (should be %u)\n %M\n", FORMsize, chunk_size, marker) ;
FORMsize = chunk_size ;
}
else
psf_log_printf (psf, "FORM : %u\n %M\n", FORMsize, marker) ;
/* Set this to 0, so we don't jump a byte when parsing the next marker. */
chunk_size = 0 ;
break ;
case COMM_MARKER :
paiff->comm_offset = psf_ftell (psf) - 8 ;
chunk_size += chunk_size & 1 ;
comm_fmt->size = chunk_size ;
if ((error = aiff_read_comm_chunk (psf, comm_fmt)) != 0)
return error ;
found_chunk |= HAVE_COMM ;
break ;
case PEAK_MARKER :
/* Must have COMM chunk before PEAK chunk. */
if ((found_chunk & (HAVE_FORM | HAVE_AIFF | HAVE_COMM)) != (HAVE_FORM | HAVE_AIFF | HAVE_COMM))
return SFE_AIFF_PEAK_B4_COMM ;
psf_log_printf (psf, "%M : %d\n", marker, chunk_size) ;
if (chunk_size != AIFF_PEAK_CHUNK_SIZE (psf->sf.channels))
{ psf_binheader_readf (psf, "j", chunk_size) ;
psf_log_printf (psf, "*** File PEAK chunk too big.\n") ;
return SFE_WAV_BAD_PEAK ;
} ;
if ((psf->peak_info = peak_info_calloc (psf->sf.channels)) == NULL)
return SFE_MALLOC_FAILED ;
/* read in rest of PEAK chunk. */
psf_binheader_readf (psf, "E44", &(psf->peak_info->version), &(psf->peak_info->timestamp)) ;
if (psf->peak_info->version != 1)
psf_log_printf (psf, " version : %d *** (should be version 1)\n", psf->peak_info->version) ;
else
psf_log_printf (psf, " version : %d\n", psf->peak_info->version) ;
psf_log_printf (psf, " time stamp : %d\n", psf->peak_info->timestamp) ;
psf_log_printf (psf, " Ch Position Value\n") ;
cptr = ubuf.cbuf ;
for (k = 0 ; k < psf->sf.channels ; k++)
{ float value ;
uint32_t position ;
psf_binheader_readf (psf, "Ef4", &value, &position) ;
psf->peak_info->peaks [k].value = value ;
psf->peak_info->peaks [k].position = position ;
snprintf (cptr, sizeof (ubuf.scbuf), " %2d %-12" PRId64 " %g\n",
k, psf->peak_info->peaks [k].position, psf->peak_info->peaks [k].value) ;
cptr [sizeof (ubuf.scbuf) - 1] = 0 ;
psf_log_printf (psf, "%s", cptr) ;
} ;
psf->peak_info->peak_loc = ((found_chunk & HAVE_SSND) == 0) ? SF_PEAK_START : SF_PEAK_END ;
break ;
case SSND_MARKER :
if ((found_chunk & HAVE_AIFC) && (found_chunk & HAVE_FVER) == 0)
psf_log_printf (psf, "*** Valid AIFC files should have an FVER chunk.\n") ;
paiff->ssnd_offset = psf_ftell (psf) - 8 ;
SSNDsize = chunk_size ;
psf_binheader_readf (psf, "E44", &(ssnd_fmt.offset), &(ssnd_fmt.blocksize)) ;
psf->datalength = SSNDsize - sizeof (ssnd_fmt) ;
psf->dataoffset = psf_ftell (psf) ;
if (psf->datalength > psf->filelength - psf->dataoffset || psf->datalength < 0)
{ psf_log_printf (psf, " SSND : %u (should be %D)\n", SSNDsize, psf->filelength - psf->dataoffset + sizeof (SSND_CHUNK)) ;
psf->datalength = psf->filelength - psf->dataoffset ;
}
else
psf_log_printf (psf, " SSND : %u\n", SSNDsize) ;
if (ssnd_fmt.offset == 0 || psf->dataoffset + ssnd_fmt.offset == ssnd_fmt.blocksize)
{ psf_log_printf (psf, " Offset : %u\n", ssnd_fmt.offset) ;
psf_log_printf (psf, " Block Size : %u\n", ssnd_fmt.blocksize) ;
psf->dataoffset += ssnd_fmt.offset ;
psf->datalength -= ssnd_fmt.offset ;
}
else
{ psf_log_printf (psf, " Offset : %u\n", ssnd_fmt.offset) ;
psf_log_printf (psf, " Block Size : %u ???\n", ssnd_fmt.blocksize) ;
psf->dataoffset += ssnd_fmt.offset ;
psf->datalength -= ssnd_fmt.offset ;
} ;
/* Only set dataend if there really is data at the end. */
if (psf->datalength + psf->dataoffset < psf->filelength)
psf->dataend = psf->datalength + psf->dataoffset ;
found_chunk |= HAVE_SSND ;
if (! psf->sf.seekable)
break ;
/* Seek to end of SSND chunk. */
psf_fseek (psf, psf->dataoffset + psf->datalength, SEEK_SET) ;
break ;
case c_MARKER :
if (chunk_size == 0)
break ;
if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf))
{ psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ;
return SFE_INTERNAL ;
} ;
cptr = ubuf.cbuf ;
psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ;
cptr [chunk_size] = 0 ;
psf_sanitize_string (cptr, chunk_size) ;
psf_log_printf (psf, " %M : %s\n", marker, cptr) ;
psf_store_string (psf, SF_STR_COPYRIGHT, cptr) ;
chunk_size += chunk_size & 1 ;
break ;
case AUTH_MARKER :
if (chunk_size == 0)
break ;
if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 1)
{ psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ;
return SFE_INTERNAL ;
} ;
cptr = ubuf.cbuf ;
psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ;
cptr [chunk_size] = 0 ;
psf_log_printf (psf, " %M : %s\n", marker, cptr) ;
psf_store_string (psf, SF_STR_ARTIST, cptr) ;
chunk_size += chunk_size & 1 ;
break ;
case COMT_MARKER :
{ uint16_t count, id, len ;
uint32_t timestamp, bytes ;
if (chunk_size == 0)
break ;
bytes = chunk_size ;
bytes -= psf_binheader_readf (psf, "E2", &count) ;
psf_log_printf (psf, " %M : %d\n count : %d\n", marker, chunk_size, count) ;
for (k = 0 ; k < count ; k++)
{ bytes -= psf_binheader_readf (psf, "E422", ×tamp, &id, &len) ;
psf_log_printf (psf, " time : 0x%x\n marker : %x\n length : %d\n", timestamp, id, len) ;
if (len + 1 > SIGNED_SIZEOF (ubuf.scbuf))
{ psf_log_printf (psf, "\nError : string length (%d) too big.\n", len) ;
return SFE_INTERNAL ;
} ;
cptr = ubuf.cbuf ;
bytes -= psf_binheader_readf (psf, "b", cptr, len) ;
cptr [len] = 0 ;
psf_log_printf (psf, " string : %s\n", cptr) ;
} ;
if (bytes > 0)
psf_binheader_readf (psf, "j", bytes) ;
} ;
break ;
case APPL_MARKER :
{ unsigned appl_marker ;
if (chunk_size == 0)
break ;
if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 1)
{ psf_log_printf (psf, " %M : %u (too big, skipping)\n", marker, chunk_size) ;
psf_binheader_readf (psf, "j", chunk_size + (chunk_size & 1)) ;
break ;
} ;
if (chunk_size < 4)
{ psf_log_printf (psf, " %M : %d (too small, skipping)\n", marker, chunk_size) ;
psf_binheader_readf (psf, "j", chunk_size + (chunk_size & 1)) ;
break ;
} ;
cptr = ubuf.cbuf ;
psf_binheader_readf (psf, "mb", &appl_marker, cptr, chunk_size + (chunk_size & 1) - 4) ;
cptr [chunk_size] = 0 ;
for (k = 0 ; k < (int) chunk_size ; k++)
if (! psf_isprint (cptr [k]))
{ cptr [k] = 0 ;
break ;
} ;
psf_log_printf (psf, " %M : %d\n AppSig : %M\n Name : %s\n", marker, chunk_size, appl_marker, cptr) ;
psf_store_string (psf, SF_STR_SOFTWARE, cptr) ;
chunk_size += chunk_size & 1 ;
} ;
break ;
case NAME_MARKER :
if (chunk_size == 0)
break ;
if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 2)
{ psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ;
return SFE_INTERNAL ;
} ;
cptr = ubuf.cbuf ;
psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ;
cptr [chunk_size] = 0 ;
psf_log_printf (psf, " %M : %s\n", marker, cptr) ;
psf_store_string (psf, SF_STR_TITLE, cptr) ;
chunk_size += chunk_size & 1 ;
break ;
case ANNO_MARKER :
if (chunk_size == 0)
break ;
if (chunk_size >= SIGNED_SIZEOF (ubuf.scbuf) - 2)
{ psf_log_printf (psf, " %M : %d (too big)\n", marker, chunk_size) ;
return SFE_INTERNAL ;
} ;
cptr = ubuf.cbuf ;
psf_binheader_readf (psf, "b", cptr, chunk_size + (chunk_size & 1)) ;
cptr [chunk_size] = 0 ;
psf_log_printf (psf, " %M : %s\n", marker, cptr) ;
psf_store_string (psf, SF_STR_COMMENT, cptr) ;
chunk_size += chunk_size & 1 ;
break ;
case INST_MARKER :
if (chunk_size != SIZEOF_INST_CHUNK)
{ psf_log_printf (psf, " %M : %d (should be %d)\n", marker, chunk_size, SIZEOF_INST_CHUNK) ;
psf_binheader_readf (psf, "j", chunk_size) ;
break ;
} ;
psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ;
{ uint8_t bytes [6] ;
int16_t gain ;
if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL)
return SFE_MALLOC_FAILED ;
psf_binheader_readf (psf, "b", bytes, 6) ;
psf_log_printf (psf, " Base Note : %u\n Detune : %u\n"
" Low Note : %u\n High Note : %u\n"
" Low Vel. : %u\n High Vel. : %u\n",
bytes [0], bytes [1], bytes [2], bytes [3], bytes [4], bytes [5]) ;
psf->instrument->basenote = bytes [0] ;
psf->instrument->detune = bytes [1] ;
psf->instrument->key_lo = bytes [2] ;
psf->instrument->key_hi = bytes [3] ;
psf->instrument->velocity_lo = bytes [4] ;
psf->instrument->velocity_hi = bytes [5] ;
psf_binheader_readf (psf, "E2", &gain) ;
psf->instrument->gain = gain ;
psf_log_printf (psf, " Gain (dB) : %d\n", gain) ;
} ;
{ int16_t mode ; /* 0 - no loop, 1 - forward looping, 2 - backward looping */
const char *loop_mode ;
uint16_t begin, end ;
psf_binheader_readf (psf, "E222", &mode, &begin, &end) ;
loop_mode = get_loop_mode_str (mode) ;
mode = get_loop_mode (mode) ;
if (mode == SF_LOOP_NONE)
{ psf->instrument->loop_count = 0 ;
psf->instrument->loops [0].mode = SF_LOOP_NONE ;
}
else
{ psf->instrument->loop_count = 1 ;
psf->instrument->loops [0].mode = SF_LOOP_FORWARD ;
psf->instrument->loops [0].start = begin ;
psf->instrument->loops [0].end = end ;
psf->instrument->loops [0].count = 0 ;
} ;
psf_log_printf (psf, " Sustain\n mode : %d => %s\n begin : %u\n end : %u\n",
mode, loop_mode, begin, end) ;
psf_binheader_readf (psf, "E222", &mode, &begin, &end) ;
loop_mode = get_loop_mode_str (mode) ;
mode = get_loop_mode (mode) ;
if (mode == SF_LOOP_NONE)
psf->instrument->loops [1].mode = SF_LOOP_NONE ;
else
{ psf->instrument->loop_count += 1 ;
psf->instrument->loops [1].mode = SF_LOOP_FORWARD ;
psf->instrument->loops [1].start = begin ;
psf->instrument->loops [1].end = end ;
psf->instrument->loops [1].count = 0 ;
} ;
psf_log_printf (psf, " Release\n mode : %d => %s\n begin : %u\n end : %u\n",
mode, loop_mode, begin, end) ;
} ;
instr_found++ ;
break ;
case basc_MARKER :
psf_log_printf (psf, " basc : %u\n", chunk_size) ;
if ((error = aiff_read_basc_chunk (psf, chunk_size)))
return error ;
break ;
case MARK_MARKER :
psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ;
{ uint16_t mark_id, n = 0 ;
uint32_t position ;
bytesread = psf_binheader_readf (psf, "E2", &n) ;
mark_count = n ;
psf_log_printf (psf, " Count : %u\n", mark_count) ;
if (paiff->markstr != NULL)
{ psf_log_printf (psf, "*** Second MARK chunk found. Throwing away the first.\n") ;
free (paiff->markstr) ;
} ;
paiff->markstr = calloc (mark_count, sizeof (MARK_ID_POS)) ;
if (paiff->markstr == NULL)
return SFE_MALLOC_FAILED ;
if (mark_count > 1000)
{ psf_log_printf (psf, " More than 1000 markers, skipping!\n") ;
psf_binheader_readf (psf, "j", chunk_size - bytesread) ;
break ;
} ;
if ((psf->cues = psf_cues_alloc (mark_count)) == NULL)
return SFE_MALLOC_FAILED ;
for (n = 0 ; n < mark_count && bytesread < chunk_size ; n++)
{ uint32_t pstr_len ;
uint8_t ch ;
bytesread += psf_binheader_readf (psf, "E241", &mark_id, &position, &ch) ;
psf_log_printf (psf, " Mark ID : %u\n Position : %u\n", mark_id, position) ;
psf->cues->cue_points [n].indx = mark_id ;
psf->cues->cue_points [n].position = 0 ;
psf->cues->cue_points [n].fcc_chunk = MAKE_MARKER ('d', 'a', 't', 'a') ; /* always data */
psf->cues->cue_points [n].chunk_start = 0 ;
psf->cues->cue_points [n].block_start = 0 ;
psf->cues->cue_points [n].sample_offset = position ;
pstr_len = (ch & 1) ? ch : ch + 1 ;
if (pstr_len < sizeof (ubuf.scbuf) - 1)
{ bytesread += psf_binheader_readf (psf, "b", ubuf.scbuf, pstr_len) ;
ubuf.scbuf [pstr_len] = 0 ;
}
else
{ uint32_t read_len = pstr_len - (sizeof (ubuf.scbuf) - 1) ;
bytesread += psf_binheader_readf (psf, "bj", ubuf.scbuf, read_len, pstr_len - read_len) ;
ubuf.scbuf [sizeof (ubuf.scbuf) - 1] = 0 ;
}
psf_log_printf (psf, " Name : %s\n", ubuf.scbuf) ;
psf_strlcpy (psf->cues->cue_points [n].name, sizeof (psf->cues->cue_points [n].name), ubuf.cbuf) ;
paiff->markstr [n].markerID = mark_id ;
paiff->markstr [n].position = position ;
/*
** TODO if ubuf.scbuf is equal to
** either Beg_loop, Beg loop or beg loop and spam
** if (psf->instrument == NULL && (psf->instrument = psf_instrument_alloc ()) == NULL)
** return SFE_MALLOC_FAILED ;
*/
} ;
} ;
mark_found++ ;
psf_binheader_readf (psf, "j", chunk_size - bytesread) ;
break ;
case FVER_MARKER :
found_chunk |= HAVE_FVER ;
/* Falls through. */
case SFX_MARKER :
psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ;
psf_binheader_readf (psf, "j", chunk_size) ;
break ;
case NONE_MARKER :
/* Fix for broken AIFC files with incorrect COMM chunk length. */
chunk_size = (chunk_size >> 24) - 3 ;
psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ;
psf_binheader_readf (psf, "j", make_size_t (chunk_size)) ;
break ;
case CHAN_MARKER :
if (chunk_size < 12)
{ psf_log_printf (psf, " %M : %d (should be >= 12)\n", marker, chunk_size) ;
psf_binheader_readf (psf, "j", chunk_size) ;
break ;
}
psf_log_printf (psf, " %M : %d\n", marker, chunk_size) ;
if ((error = aiff_read_chanmap (psf, chunk_size)))
return error ;
break ;
default :
if (chunk_size >= 0xffff0000)
{ done = SF_TRUE ;
psf_log_printf (psf, "*** Unknown chunk marker (%X) at position %D with length %u. Exiting parser.\n", marker, psf_ftell (psf) - 8, chunk_size) ;
break ;
} ;
if (psf_isprint ((marker >> 24) & 0xFF) && psf_isprint ((marker >> 16) & 0xFF)
&& psf_isprint ((marker >> 8) & 0xFF) && psf_isprint (marker & 0xFF))
{ psf_log_printf (psf, " %M : %u (unknown marker)\n", marker, chunk_size) ;
psf_binheader_readf (psf, "j", chunk_size) ;
break ;
} ;
if (psf_ftell (psf) & 0x03)
{ psf_log_printf (psf, " Unknown chunk marker at position %D. Resynching.\n", psf_ftell (psf) - 8) ;
psf_binheader_readf (psf, "j", -3) ;
break ;
} ;
psf_log_printf (psf, "*** Unknown chunk marker %X at position %D. Exiting parser.\n", marker, psf_ftell (psf)) ;
done = SF_TRUE ;
break ;
} ; /* switch (marker) */
if (chunk_size >= psf->filelength)
{ psf_log_printf (psf, "*** Chunk size %u > file length %D. Exiting parser.\n", chunk_size, psf->filelength) ;
break ;
} ;
if ((! psf->sf.seekable) && (found_chunk & HAVE_SSND))
break ;
if (psf_ftell (psf) >= psf->filelength - (2 * SIGNED_SIZEOF (int32_t)))
break ;
} ; /* while (1) */
if (instr_found && mark_found)
{ int ji, str_index ;
/* Next loop will convert markers to loop positions for internal handling */
for (ji = 0 ; ji < psf->instrument->loop_count ; ji ++)
{ if (ji < ARRAY_LEN (psf->instrument->loops))
{ psf->instrument->loops [ji].start = marker_to_position (paiff->markstr, psf->instrument->loops [ji].start, mark_count) ;
psf->instrument->loops [ji].end = marker_to_position (paiff->markstr, psf->instrument->loops [ji].end, mark_count) ;
psf->instrument->loops [ji].mode = SF_LOOP_FORWARD ;
} ;
} ;
/* The markers that correspond to loop positions can now be removed from cues struct */
if (psf->cues->cue_count > (uint32_t) (psf->instrument->loop_count * 2))
{ uint32_t j ;
for (j = 0 ; j < psf->cues->cue_count - (uint32_t) (psf->instrument->loop_count * 2) ; j ++)
{ /* This simply copies the information in cues above loop positions and writes it at current count instead */
psf->cues->cue_points [j].indx = psf->cues->cue_points [j + psf->instrument->loop_count * 2].indx ;
psf->cues->cue_points [j].position = psf->cues->cue_points [j + psf->instrument->loop_count * 2].position ;
psf->cues->cue_points [j].fcc_chunk = psf->cues->cue_points [j + psf->instrument->loop_count * 2].fcc_chunk ;
psf->cues->cue_points [j].chunk_start = psf->cues->cue_points [j + psf->instrument->loop_count * 2].chunk_start ;
psf->cues->cue_points [j].block_start = psf->cues->cue_points [j + psf->instrument->loop_count * 2].block_start ;
psf->cues->cue_points [j].sample_offset = psf->cues->cue_points [j + psf->instrument->loop_count * 2].sample_offset ;
for (str_index = 0 ; str_index < 256 ; str_index++)
psf->cues->cue_points [j].name [str_index] = psf->cues->cue_points [j + psf->instrument->loop_count * 2].name [str_index] ;
} ;
psf->cues->cue_count -= psf->instrument->loop_count * 2 ;
} else
{ /* All the cues were in fact loop positions so we can actually remove the cues altogether */
free (psf->cues) ;
psf->cues = NULL ;
}
} ;
if (psf->sf.channels < 1)
return SFE_CHANNEL_COUNT_ZERO ;
if (psf->sf.channels > SF_MAX_CHANNELS)
return SFE_CHANNEL_COUNT ;
if (! (found_chunk & HAVE_FORM))
return SFE_AIFF_NO_FORM ;
if (! (found_chunk & HAVE_AIFF))
return SFE_AIFF_COMM_NO_FORM ;
if (! (found_chunk & HAVE_COMM))
return SFE_AIFF_SSND_NO_COMM ;
if (! psf->dataoffset)
return SFE_AIFF_NO_DATA ;
return 0 ;
} /* aiff_read_header */
| 0 |
[
"CWE-476"
] |
libsndfile
|
6f3266277bed16525f0ac2f0f03ff4626f1923e5
| 74,889,888,949,506,110,000,000,000,000,000,000,000 | 573 |
Fix max channel count bug
The code was allowing files to be written with a channel count of exactly
`SF_MAX_CHANNELS` but was failing to read some file formats with the same
channel count.
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.