func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
test_bson_append_oid (void)
{
bson_oid_t oid;
bson_t *b;
bson_t *b2;
bson_oid_init_from_string (&oid, "1234567890abcdef1234abcd");
b = bson_new ();
BSON_ASSERT (bson_append_oid (b, "oid", -1, &oid));
b2 = get_bson ("test22.bson");
BSON_ASSERT_BSON_EQUAL (b, b2);
bson_destroy (b);
bson_destroy (b2);
}
| 0 |
[
"CWE-125"
] |
libbson
|
42900956dc461dfe7fb91d93361d10737c1602b3
| 189,717,805,965,527,180,000,000,000,000,000,000,000 | 15 |
CDRIVER-2269 Check for zero string length in codewscope
|
static void ViewModeBoxes(int mode)
{
printf("<p>%s: \n", _("Current View Is"));
printf("<input type=radio name=\"ViewMode\" value=0 %s>%s\n", ((mode == 0) ? "checked" : ""), _("Basic"));
printf("<input type=radio name=\"ViewMode\" value=1 %s>%s\n", ((mode == 1) ? "checked" : ""), _("Advanced"));
printf("<br>%s: \n", _("Change View To"));
printf("<input type=submit name=\"BasicMode\" value=\"%s\">\n", _("Basic"));
printf("<input type=submit name=\"AdvMode\" value=\"%s\">\n", _("Advanced"));
printf("</p><br>\n");
}
| 0 |
[] |
samba
|
71225948a249f079120282740fcc39fd6faa880e
| 270,938,805,078,782,250,000,000,000,000,000,000,000 | 10 |
swat: Use X-Frame-Options header to avoid clickjacking
Jann Horn reported a potential clickjacking vulnerability in SWAT where
the SWAT page could be embedded into an attacker's page using a frame or
iframe and then used to trick the user to change Samba settings.
Avoid this by telling the browser to refuse the frame embedding via the
X-Frame-Options: DENY header.
Signed-off-by: Kai Blin <[email protected]>
Fix bug #9576 - CVE-2013-0213: Clickjacking issue in SWAT.
|
int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
{
int i, j, k, ret = 0;
ushort ect;
u16 **p1, *p2;
struct uni_pagedir *p;
struct unipair *unilist;
unilist = kvmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
if (!unilist)
return -ENOMEM;
console_lock();
ect = 0;
if (*vc->vc_uni_pagedir_loc) {
p = *vc->vc_uni_pagedir_loc;
for (i = 0; i < 32; i++) {
p1 = p->uni_pgdir[i];
if (p1)
for (j = 0; j < 32; j++) {
p2 = *(p1++);
if (p2)
for (k = 0; k < 64; k++, p2++) {
if (*p2 >= MAX_GLYPH)
continue;
if (ect < ct) {
unilist[ect].unicode =
(i<<11)+(j<<6)+k;
unilist[ect].fontpos = *p2;
}
ect++;
}
}
}
}
console_unlock();
if (copy_to_user(list, unilist, min(ect, ct) * sizeof(struct unipair)))
ret = -EFAULT;
put_user(ect, uct);
kvfree(unilist);
return ret ? ret : (ect <= ct) ? 0 : -ENOMEM;
}
| 0 |
[
"CWE-401"
] |
tty
|
84ecc2f6eb1cb12e6d44818f94fa49b50f06e6ac
| 327,168,360,875,608,900,000,000,000,000,000,000,000 | 43 |
consolemap: Fix a memory leaking bug in drivers/tty/vt/consolemap.c
In function con_insert_unipair(), when allocation for p2 and p1[n]
fails, ENOMEM is returned, but previously allocated p1 is not freed,
remains as leaking memory. Thus we should free p1 as well when this
allocation fails.
Signed-off-by: Gen Zhang <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void init_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
int callsite, int frameno, int subprogno)
{
state->callsite = callsite;
state->frameno = frameno;
state->subprogno = subprogno;
init_reg_state(env, state);
}
| 0 |
[
"CWE-125"
] |
linux
|
b799207e1e1816b09e7a5920fbb2d5fcf6edd681
| 311,195,656,271,057,680,000,000,000,000,000,000,000 | 9 |
bpf: 32-bit RSH verification must truncate input before the ALU op
When I wrote commit 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification"), I
assumed that, in order to emulate 64-bit arithmetic with 32-bit logic, it
is sufficient to just truncate the output to 32 bits; and so I just moved
the register size coercion that used to be at the start of the function to
the end of the function.
That assumption is true for almost every op, but not for 32-bit right
shifts, because those can propagate information towards the least
significant bit. Fix it by always truncating inputs for 32-bit ops to 32
bits.
Also get rid of the coerce_reg_to_size() after the ALU op, since that has
no effect.
Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification")
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
|
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
{
struct mdp_superblock_1 *sb;
struct md_rdev *rdev2;
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
sb = page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
else
sb->resync_offset = cpu_to_le64(0);
sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
if (test_bit(WriteMostly, &rdev->flags))
sb->devflags |= WriteMostly1;
else
sb->devflags &= ~WriteMostly1;
sb->data_offset = cpu_to_le64(rdev->data_offset);
sb->data_size = cpu_to_le64(rdev->sectors);
if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
sb->feature_map |=
cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
}
if (test_bit(Replacement, &rdev->flags))
sb->feature_map |=
cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
sb->reshape_position = cpu_to_le64(mddev->reshape_position);
sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
if (mddev->delta_disks == 0 &&
mddev->reshape_backwards)
sb->feature_map
|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
if (rdev->new_data_offset != rdev->data_offset) {
sb->feature_map
|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
- rdev->data_offset));
}
}
if (rdev->badblocks.count == 0)
/* Nothing to do for bad blocks*/ ;
else if (sb->bblog_offset == 0)
/* Cannot record bad blocks on this device */
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
u64 *bbp = (u64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
unsigned seq;
retry:
seq = read_seqbegin(&bb->lock);
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
u64 internal_bb = p[i];
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
bbp[i] = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
goto retry;
bb->sector = (rdev->sb_start +
(int)le32_to_cpu(sb->bblog_offset));
bb->size = le16_to_cpu(sb->bblog_size);
}
}
max_dev = 0;
rdev_for_each(rdev2, mddev)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
if (max_dev > le32_to_cpu(sb->max_dev)) {
int bmask;
sb->max_dev = cpu_to_le32(max_dev);
rdev->sb_size = max_dev * 2 + 256;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;
} else
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
}
sb->sb_csum = calc_sb_1_csum(sb);
}
| 0 |
[
"CWE-200"
] |
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 169,659,742,485,824,340,000,000,000,000,000,000,000 | 140 |
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
|
conntrack_destroy(struct conntrack *ct)
{
struct conn *conn;
latch_set(&ct->clean_thread_exit);
pthread_join(ct->clean_thread, NULL);
latch_destroy(&ct->clean_thread_exit);
ovs_mutex_lock(&ct->ct_lock);
CMAP_FOR_EACH (conn, cm_node, &ct->conns) {
conn_clean_one(ct, conn);
}
cmap_destroy(&ct->conns);
struct zone_limit *zl;
HMAP_FOR_EACH_POP (zl, node, &ct->zone_limits) {
free(zl);
}
hmap_destroy(&ct->zone_limits);
ovs_mutex_unlock(&ct->ct_lock);
ovs_mutex_destroy(&ct->ct_lock);
ovs_rwlock_wrlock(&ct->resources_lock);
struct alg_exp_node *alg_exp_node;
HMAP_FOR_EACH_POP (alg_exp_node, node, &ct->alg_expectations) {
free(alg_exp_node);
}
hmap_destroy(&ct->alg_expectations);
hindex_destroy(&ct->alg_expectation_refs);
ovs_rwlock_unlock(&ct->resources_lock);
ovs_rwlock_destroy(&ct->resources_lock);
ipf_destroy(ct->ipf);
free(ct);
}
| 0 |
[
"CWE-400"
] |
ovs
|
3512fb512c76a1f08eba4005aa2eb69160d0840e
| 318,973,206,805,164,300,000,000,000,000,000,000,000 | 35 |
flow: Support extra padding length.
Although not required, padding can be optionally added until
the packet length is MTU bytes. A packet with extra padding
currently fails sanity checks.
Vulnerability: CVE-2020-35498
Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.")
Reported-by: Joakim Hindersson <[email protected]>
Acked-by: Ilya Maximets <[email protected]>
Signed-off-by: Flavio Leitner <[email protected]>
Signed-off-by: Ilya Maximets <[email protected]>
|
static void get_delta_sk(X509_STORE_CTX *ctx, X509_CRL **dcrl, int *pscore,
X509_CRL *base, STACK_OF(X509_CRL) *crls)
{
X509_CRL *delta;
int i;
if (!(ctx->param->flags & X509_V_FLAG_USE_DELTAS))
return;
if (!((ctx->current_cert->ex_flags | base->flags) & EXFLAG_FRESHEST))
return;
for (i = 0; i < sk_X509_CRL_num(crls); i++) {
delta = sk_X509_CRL_value(crls, i);
if (check_delta_base(delta, base)) {
if (check_crl_time(ctx, delta, 0))
*pscore |= CRL_SCORE_TIME_DELTA;
CRYPTO_add(&delta->references, 1, CRYPTO_LOCK_X509_CRL);
*dcrl = delta;
return;
}
}
*dcrl = NULL;
}
| 0 |
[
"CWE-119"
] |
openssl
|
370ac320301e28bb615cee80124c042649c95d14
| 232,064,945,544,797,900,000,000,000,000,000,000,000 | 21 |
Fix length checks in X509_cmp_time to avoid out-of-bounds reads.
Also tighten X509_cmp_time to reject more than three fractional
seconds in the time; and to reject trailing garbage after the offset.
CVE-2015-1789
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Richard Levitte <[email protected]>
|
static void opj_j2k_write_float_to_float64 (const void * p_src_data, void * p_dest_data, OPJ_UINT32 p_nb_elem)
{
OPJ_BYTE * l_dest_data = (OPJ_BYTE *) p_dest_data;
OPJ_FLOAT32 * l_src_data = (OPJ_FLOAT32 *) p_src_data;
OPJ_UINT32 i;
OPJ_FLOAT64 l_temp;
for (i=0;i<p_nb_elem;++i) {
l_temp = (OPJ_FLOAT64) *(l_src_data++);
opj_write_double(l_dest_data,l_temp);
l_dest_data+=sizeof(OPJ_FLOAT64);
}
}
| 0 |
[
"CWE-416"
] |
openjpeg
|
940100c28ae28931722290794889cf84a92c5f6f
| 40,179,991,728,208,800,000,000,000,000,000,000,000 | 15 |
Fix potential use-after-free in opj_j2k_write_mco function
Fixes #563
|
static void v4l_print_newline(const void *arg, bool write_only)
{
pr_cont("\n");
}
| 0 |
[
"CWE-401"
] |
linux
|
fb18802a338b36f675a388fc03d2aa504a0d0899
| 107,356,853,880,073,720,000,000,000,000,000,000,000 | 4 |
media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <[email protected]>
Reported-by: [email protected]
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: [email protected]
Signed-off-by: Sakari Ailus <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
ref_stack_allow_expansion(ref_stack_t *pstack, bool expand)
{
pstack->params->allow_expansion = expand;
}
| 0 |
[] |
ghostpdl
|
13b0a36f8181db66a91bcc8cea139998b53a8996
| 308,027,876,560,943,670,000,000,000,000,000,000,000 | 4 |
Sanitize op stack for error conditions
We save the stacks to an array and store the array for the error handler to
access.
For SAFER, we traverse the array, and deep copy any op arrays (procedures). As
we make these copies, we check for operators that do *not* exist in systemdict,
when we find one, we replace the operator with a name object (of the form
"/--opname--").
|
f_test_feedinput(typval_T *argvars, typval_T *rettv UNUSED)
{
#ifdef USE_INPUT_BUF
char_u *val = tv_get_string_chk(&argvars[0]);
if (val != NULL)
{
trash_input_buf();
add_to_input_buf_csi(val, (int)STRLEN(val));
}
#endif
}
| 0 |
[
"CWE-78"
] |
vim
|
8c62a08faf89663e5633dc5036cd8695c80f1075
| 158,990,679,315,999,520,000,000,000,000,000,000,000 | 12 |
patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others.
|
free_efm_list(efm_T **efm_first)
{
efm_T *efm_ptr;
for (efm_ptr = *efm_first; efm_ptr != NULL; efm_ptr = *efm_first)
{
*efm_first = efm_ptr->next;
vim_regfree(efm_ptr->prog);
vim_free(efm_ptr);
}
fmt_start = NULL;
}
| 0 |
[
"CWE-416"
] |
vim
|
4f1b083be43f351bc107541e7b0c9655a5d2c0bb
| 157,745,312,941,154,910,000,000,000,000,000,000,000 | 12 |
patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any.
|
static double php_apache_sapi_get_request_time(TSRMLS_D)
{
php_struct *ctx = SG(server_context);
return ((double) apr_time_as_msec(ctx->r->request_time)) / 1000.0;
}
| 0 |
[
"CWE-20"
] |
php-src
|
809610f5ea38a83b284e1125d1fff129bdd615e7
| 320,106,180,472,151,930,000,000,000,000,000,000,000 | 5 |
Fix bug #68486 and bug #69218 (segfault in apache2handler with apache 2.4)
|
static __init int seqgen_init(void)
{
rekey_seq_generator(NULL);
return 0;
}
| 1 |
[
"CWE-703"
] |
linux
|
6e5714eaf77d79ae1c8b47e3e040ff5411b717ec
| 258,879,742,463,399,870,000,000,000,000,000,000,000 | 5 |
net: Compute protocol sequence numbers and fragment IDs using MD5.
Computers have become a lot faster since we compromised on the
partial MD4 hash which we use currently for performance reasons.
MD5 is a much safer choice, and is inline with both RFC1948 and
other ISS generators (OpenBSD, Solaris, etc.)
Furthermore, only having 24-bits of the sequence number be truly
unpredictable is a very serious limitation. So the periodic
regeneration and 8-bit counter have been removed. We compute and
use a full 32-bit sequence number.
For ipv6, DCCP was found to use a 32-bit truncated initial sequence
number (it needs 43-bits) and that is fixed here as well.
Reported-by: Dan Kaminsky <[email protected]>
Tested-by: Willy Tarreau <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
bgp_attr_parse (struct peer *peer, struct attr *attr, bgp_size_t size,
struct bgp_nlri *mp_update, struct bgp_nlri *mp_withdraw)
{
int ret;
u_char flag = 0;
u_char type = 0;
bgp_size_t length;
u_char *startp, *endp;
u_char *attr_endp;
u_char seen[BGP_ATTR_BITMAP_SIZE];
/* we need the as4_path only until we have synthesized the as_path with it */
/* same goes for as4_aggregator */
struct aspath *as4_path = NULL;
as_t as4_aggregator = 0;
struct in_addr as4_aggregator_addr = { 0 };
/* Initialize bitmap. */
memset (seen, 0, BGP_ATTR_BITMAP_SIZE);
/* End pointer of BGP attribute. */
endp = BGP_INPUT_PNT (peer) + size;
/* Get attributes to the end of attribute length. */
while (BGP_INPUT_PNT (peer) < endp)
{
/* Check remaining length check.*/
if (endp - BGP_INPUT_PNT (peer) < BGP_ATTR_MIN_LEN)
{
/* XXX warning: long int format, int arg (arg 5) */
zlog (peer->log, LOG_WARNING,
"%s: error BGP attribute length %lu is smaller than min len",
peer->host,
(unsigned long) (endp - STREAM_PNT (BGP_INPUT (peer))));
bgp_notify_send (peer,
BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
return BGP_ATTR_PARSE_ERROR;
}
/* Fetch attribute flag and type. */
startp = BGP_INPUT_PNT (peer);
/* "The lower-order four bits of the Attribute Flags octet are
unused. They MUST be zero when sent and MUST be ignored when
received." */
flag = 0xF0 & stream_getc (BGP_INPUT (peer));
type = stream_getc (BGP_INPUT (peer));
/* Check whether Extended-Length applies and is in bounds */
if (CHECK_FLAG (flag, BGP_ATTR_FLAG_EXTLEN)
&& ((endp - startp) < (BGP_ATTR_MIN_LEN + 1)))
{
zlog (peer->log, LOG_WARNING,
"%s: Extended length set, but just %lu bytes of attr header",
peer->host,
(unsigned long) (endp - STREAM_PNT (BGP_INPUT (peer))));
bgp_notify_send (peer,
BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
return BGP_ATTR_PARSE_ERROR;
}
/* Check extended attribue length bit. */
if (CHECK_FLAG (flag, BGP_ATTR_FLAG_EXTLEN))
length = stream_getw (BGP_INPUT (peer));
else
length = stream_getc (BGP_INPUT (peer));
/* If any attribute appears more than once in the UPDATE
message, then the Error Subcode is set to Malformed Attribute
List. */
if (CHECK_BITMAP (seen, type))
{
zlog (peer->log, LOG_WARNING,
"%s: error BGP attribute type %d appears twice in a message",
peer->host, type);
bgp_notify_send (peer,
BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MAL_ATTR);
return BGP_ATTR_PARSE_ERROR;
}
/* Set type to bitmap to check duplicate attribute. `type' is
unsigned char so it never overflow bitmap range. */
SET_BITMAP (seen, type);
/* Overflow check. */
attr_endp = BGP_INPUT_PNT (peer) + length;
if (attr_endp > endp)
{
zlog (peer->log, LOG_WARNING,
"%s: BGP type %d length %d is too large, attribute total length is %d. attr_endp is %p. endp is %p", peer->host, type, length, size, attr_endp, endp);
bgp_notify_send (peer,
BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
return BGP_ATTR_PARSE_ERROR;
}
struct bgp_attr_parser_args attr_args = {
.peer = peer,
.length = length,
.attr = attr,
.type = type,
.flags = flag,
.startp = startp,
.total = attr_endp - startp,
};
/* If any recognized attribute has Attribute Flags that conflict
with the Attribute Type Code, then the Error Subcode is set to
Attribute Flags Error. The Data field contains the erroneous
attribute (type, length and value). */
if (bgp_attr_flag_invalid (&attr_args))
return bgp_attr_malformed (&attr_args,
BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR,
attr_args.total);
/* OK check attribute and store it's value. */
switch (type)
{
case BGP_ATTR_ORIGIN:
ret = bgp_attr_origin (&attr_args);
break;
case BGP_ATTR_AS_PATH:
ret = bgp_attr_aspath (&attr_args);
break;
case BGP_ATTR_AS4_PATH:
ret = bgp_attr_as4_path (&attr_args, &as4_path);
break;
case BGP_ATTR_NEXT_HOP:
ret = bgp_attr_nexthop (&attr_args);
break;
case BGP_ATTR_MULTI_EXIT_DISC:
ret = bgp_attr_med (&attr_args);
break;
case BGP_ATTR_LOCAL_PREF:
ret = bgp_attr_local_pref (&attr_args);
break;
case BGP_ATTR_ATOMIC_AGGREGATE:
ret = bgp_attr_atomic (&attr_args);
break;
case BGP_ATTR_AGGREGATOR:
ret = bgp_attr_aggregator (&attr_args);
break;
case BGP_ATTR_AS4_AGGREGATOR:
ret = bgp_attr_as4_aggregator (&attr_args,
&as4_aggregator,
&as4_aggregator_addr);
break;
case BGP_ATTR_COMMUNITIES:
ret = bgp_attr_community (&attr_args);
break;
case BGP_ATTR_ORIGINATOR_ID:
ret = bgp_attr_originator_id (&attr_args);
break;
case BGP_ATTR_CLUSTER_LIST:
ret = bgp_attr_cluster_list (&attr_args);
break;
case BGP_ATTR_MP_REACH_NLRI:
ret = bgp_mp_reach_parse (&attr_args, mp_update);
break;
case BGP_ATTR_MP_UNREACH_NLRI:
ret = bgp_mp_unreach_parse (&attr_args, mp_withdraw);
break;
case BGP_ATTR_EXT_COMMUNITIES:
ret = bgp_attr_ext_communities (&attr_args);
break;
default:
ret = bgp_attr_unknown (&attr_args);
break;
}
/* If hard error occured immediately return to the caller. */
if (ret == BGP_ATTR_PARSE_ERROR)
{
zlog (peer->log, LOG_WARNING,
"%s: Attribute %s, parse error",
peer->host,
LOOKUP (attr_str, type));
bgp_notify_send (peer,
BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_MAL_ATTR);
if (as4_path)
aspath_unintern (&as4_path);
return ret;
}
if (ret == BGP_ATTR_PARSE_WITHDRAW)
{
zlog (peer->log, LOG_WARNING,
"%s: Attribute %s, parse error - treating as withdrawal",
peer->host,
LOOKUP (attr_str, type));
if (as4_path)
aspath_unintern (&as4_path);
return ret;
}
/* Check the fetched length. */
if (BGP_INPUT_PNT (peer) != attr_endp)
{
zlog (peer->log, LOG_WARNING,
"%s: BGP attribute %s, fetch error",
peer->host, LOOKUP (attr_str, type));
bgp_notify_send (peer,
BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
if (as4_path)
aspath_unintern (&as4_path);
return BGP_ATTR_PARSE_ERROR;
}
}
/* Check final read pointer is same as end pointer. */
if (BGP_INPUT_PNT (peer) != endp)
{
zlog (peer->log, LOG_WARNING,
"%s: BGP attribute %s, length mismatch",
peer->host, LOOKUP (attr_str, type));
bgp_notify_send (peer,
BGP_NOTIFY_UPDATE_ERR,
BGP_NOTIFY_UPDATE_ATTR_LENG_ERR);
if (as4_path)
aspath_unintern (&as4_path);
return BGP_ATTR_PARSE_ERROR;
}
/*
* At this place we can see whether we got AS4_PATH and/or
* AS4_AGGREGATOR from a 16Bit peer and act accordingly.
* We can not do this before we've read all attributes because
* the as4 handling does not say whether AS4_PATH has to be sent
* after AS_PATH or not - and when AS4_AGGREGATOR will be send
* in relationship to AGGREGATOR.
* So, to be defensive, we are not relying on any order and read
* all attributes first, including these 32bit ones, and now,
* afterwards, we look what and if something is to be done for as4.
*/
if (bgp_attr_munge_as4_attrs (peer, attr, as4_path,
as4_aggregator, &as4_aggregator_addr))
{
if (as4_path)
aspath_unintern (&as4_path);
return BGP_ATTR_PARSE_ERROR;
}
/* At this stage, we have done all fiddling with as4, and the
* resulting info is in attr->aggregator resp. attr->aspath
* so we can chuck as4_aggregator and as4_path alltogether in
* order to save memory
*/
if (as4_path)
{
aspath_unintern (&as4_path); /* unintern - it is in the hash */
/* The flag that we got this is still there, but that does not
* do any trouble
*/
}
/*
* The "rest" of the code does nothing with as4_aggregator.
* there is no memory attached specifically which is not part
* of the attr.
* so ignoring just means do nothing.
*/
/*
* Finally do the checks on the aspath we did not do yet
* because we waited for a potentially synthesized aspath.
*/
if (attr->flag & (ATTR_FLAG_BIT(BGP_ATTR_AS_PATH)))
{
ret = bgp_attr_aspath_check (peer, attr);
if (ret != BGP_ATTR_PARSE_PROCEED)
return ret;
}
/* Finally intern unknown attribute. */
if (attr->extra && attr->extra->transit)
attr->extra->transit = transit_intern (attr->extra->transit);
return BGP_ATTR_PARSE_PROCEED;
}
| 0 |
[] |
quagga
|
835315bfb49bff2b2fb354f2075c6d6693c2a151
| 181,317,286,046,098,260,000,000,000,000,000,000,000 | 287 |
bgpd: Move up flag-check calls, parcel up attr-parser args, and other cleanups
* bgp_attr.h: (struct bgp_attr_parser_args) Attribute parsing context,
containing common arguments.
* bgp_attr.c: (general) Move the bgp_attr_flag_invalid flag-check calls up,
out of each individual attr parser function, to be done once in attr_parse.
Similarly move the calculation of the 'total' attribute length field up
to attr_parse.
Bundle together common arguments to attr-parsing functions and helpers
into (struct bgp_attr_parser_args), so it can be passed by reference down
the stack & also de-clutter the argument lists & make it easier to
add/modify the context for attr-parsing - add local const aliases to avoid
modifying body of code too much. This also should help avoid cut & paste
errors, where calls to helpers with hard-coded attribute types are pasted
to other functions but the code isn't changed.
(bgp_attr_flags_diagnose) as above.
(bgp_attr_flag_invalid) as above.
(bgp_attr_{origin,aspath,as4_path,nexthop,med,local_pref,atomic}) as above.
(bgp_attr_{aggregator,as4_aggregator,community,originator_id}) as above
(bgp_attr_{cluster_list,ext_communities},bgp_mp_{un,}reach_parse) as above
(bgp_attr_unknown) as above.
(bgp_attr_malformed) as above. Also, startp and length have to be
special-cased, because whether or not to send attribute data depends
on the particular error - a separate length argument, distinct from
args->length, indicates whether or not the attribute data should be sent
in the NOTIFY.
(bgp_attr_aspath_check) Call to bgp_attr_malformed is wrong here, there is
no attribute parsing context - e.g. the 'flag' argument is unlikely to be
right, remove it. Explicitly handle the error instead.
(bgp_attr_munge_as4_attrs) Flag argument is pointless.
As the comment notes, the check here is pointless as AS_PATH presence
already checked elsewhere.
(bgp_attr_parse) Do bgp_attr_flag_invalid call here.
Use (struct bgp_attr_parser_args) for args to attr parser functions.
Remove out-of-context 'flag' argument to as4 checking functions.
|
static OPJ_BOOL opj_j2k_read_poc(opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
OPJ_UINT32 i, l_nb_comp, l_tmp;
opj_image_t * l_image = 00;
OPJ_UINT32 l_old_poc_nb, l_current_poc_nb, l_current_poc_remaining;
OPJ_UINT32 l_chunk_size, l_comp_room;
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
opj_poc_t *l_current_poc = 00;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
l_image = p_j2k->m_private_image;
l_nb_comp = l_image->numcomps;
if (l_nb_comp <= 256) {
l_comp_room = 1;
} else {
l_comp_room = 2;
}
l_chunk_size = 5 + 2 * l_comp_room;
l_current_poc_nb = p_header_size / l_chunk_size;
l_current_poc_remaining = p_header_size % l_chunk_size;
if ((l_current_poc_nb <= 0) || (l_current_poc_remaining != 0)) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading POC marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
l_tcp = (p_j2k->m_specific_param.m_decoder.m_state == J2K_STATE_TPH) ?
&l_cp->tcps[p_j2k->m_current_tile_number] :
p_j2k->m_specific_param.m_decoder.m_default_tcp;
l_old_poc_nb = l_tcp->POC ? l_tcp->numpocs + 1 : 0;
l_current_poc_nb += l_old_poc_nb;
if (l_current_poc_nb >= 32) {
opj_event_msg(p_manager, EVT_ERROR, "Too many POCs %d\n", l_current_poc_nb);
return OPJ_FALSE;
}
assert(l_current_poc_nb < 32);
/* now poc is in use.*/
l_tcp->POC = 1;
l_current_poc = &l_tcp->pocs[l_old_poc_nb];
for (i = l_old_poc_nb; i < l_current_poc_nb; ++i) {
opj_read_bytes(p_header_data, &(l_current_poc->resno0),
1); /* RSpoc_i */
++p_header_data;
opj_read_bytes(p_header_data, &(l_current_poc->compno0),
l_comp_room); /* CSpoc_i */
p_header_data += l_comp_room;
opj_read_bytes(p_header_data, &(l_current_poc->layno1),
2); /* LYEpoc_i */
/* make sure layer end is in acceptable bounds */
l_current_poc->layno1 = opj_uint_min(l_current_poc->layno1, l_tcp->numlayers);
p_header_data += 2;
opj_read_bytes(p_header_data, &(l_current_poc->resno1),
1); /* REpoc_i */
++p_header_data;
opj_read_bytes(p_header_data, &(l_current_poc->compno1),
l_comp_room); /* CEpoc_i */
p_header_data += l_comp_room;
opj_read_bytes(p_header_data, &l_tmp,
1); /* Ppoc_i */
++p_header_data;
l_current_poc->prg = (OPJ_PROG_ORDER) l_tmp;
/* make sure comp is in acceptable bounds */
l_current_poc->compno1 = opj_uint_min(l_current_poc->compno1, l_nb_comp);
++l_current_poc;
}
l_tcp->numpocs = l_current_poc_nb - 1;
return OPJ_TRUE;
}
| 0 |
[
"CWE-416",
"CWE-787"
] |
openjpeg
|
4241ae6fbbf1de9658764a80944dc8108f2b4154
| 137,161,436,819,576,590,000,000,000,000,000,000,000 | 83 |
Fix assertion in debug mode / heap-based buffer overflow in opj_write_bytes_LE for Cinema profiles with numresolutions = 1 (#985)
|
void RGWInitMultipart::execute()
{
bufferlist aclbl;
map<string, bufferlist> attrs;
rgw_obj obj;
if (get_params() < 0)
return;
if (s->object.empty())
return;
policy.encode(aclbl);
attrs[RGW_ATTR_ACL] = aclbl;
populate_with_generic_attrs(s, attrs);
/* select encryption mode */
op_ret = prepare_encryption(attrs);
if (op_ret != 0)
return;
op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
if (op_ret < 0) {
return;
}
do {
char buf[33];
gen_rand_alphanumeric(s->cct, buf, sizeof(buf) - 1);
upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
upload_id.append(buf);
string tmp_obj_name;
RGWMPObj mp(s->object.name, upload_id);
tmp_obj_name = mp.get_meta();
obj.init_ns(s->bucket, tmp_obj_name, mp_ns);
// the meta object will be indexed with 0 size, we c
obj.set_in_extra_data(true);
obj.index_hash_source = s->object.name;
RGWRados::Object op_target(store, s->bucket_info, *static_cast<RGWObjectCtx *>(s->obj_ctx), obj);
op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
RGWRados::Object::Write obj_op(&op_target);
obj_op.meta.owner = s->owner.get_id();
obj_op.meta.category = RGW_OBJ_CATEGORY_MULTIMETA;
obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
op_ret = obj_op.write_meta(0, 0, attrs);
} while (op_ret == -EEXIST);
}
| 0 |
[
"CWE-770"
] |
ceph
|
ab29bed2fc9f961fe895de1086a8208e21ddaddc
| 266,515,994,690,658,900,000,000,000,000,000,000,000 | 54 |
rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests
|
static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
if (keyring->description)
seq_puts(m, keyring->description);
else
seq_puts(m, "[anon]");
if (key_is_instantiated(keyring)) {
if (keyring->keys.nr_leaves_on_tree != 0)
seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
seq_puts(m, ": empty");
}
}
| 1 |
[
"CWE-20"
] |
linux
|
363b02dab09b3226f3bd1420dad9c72b79a42a76
| 78,940,825,222,244,910,000,000,000,000,000,000,000 | 14 |
KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: [email protected] # v4.4+
Reported-by: Eric Biggers <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
|
void fx_DataView_prototype_getInt16(txMachine* the)
{
fx_DataView_prototype_get(the, 2, fxInt16Getter);
}
| 0 |
[
"CWE-125"
] |
moddable
|
135aa9a4a6a9b49b60aa730ebc3bcc6247d75c45
| 102,022,822,878,109,760,000,000,000,000,000,000,000 | 4 |
XS: #896
|
void ftrace_modify_all_code(int command)
{
if (command & FTRACE_UPDATE_CALLS)
ftrace_replace_code(1);
else if (command & FTRACE_DISABLE_CALLS)
ftrace_replace_code(0);
if (command & FTRACE_UPDATE_TRACE_FUNC)
ftrace_update_ftrace_func(ftrace_trace_function);
if (command & FTRACE_START_FUNC_RET)
ftrace_enable_ftrace_graph_caller();
else if (command & FTRACE_STOP_FUNC_RET)
ftrace_disable_ftrace_graph_caller();
}
| 0 |
[
"CWE-703"
] |
linux
|
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
| 256,291,389,576,527,670,000,000,000,000,000,000,000 | 15 |
tracing: Fix possible NULL pointer dereferences
Currently set_ftrace_pid and set_graph_function files use seq_lseek
for their fops. However seq_open() is called only for FMODE_READ in
the fops->open() so that if an user tries to seek one of those file
when she open it for writing, it sees NULL seq_file and then panic.
It can be easily reproduced with following command:
$ cd /sys/kernel/debug/tracing
$ echo 1234 | sudo tee -a set_ftrace_pid
In this example, GNU coreutils' tee opens the file with fopen(, "a")
and then the fopen() internally calls lseek().
Link: http://lkml.kernel.org/r/[email protected]
Cc: Frederic Weisbecker <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: [email protected]
Signed-off-by: Namhyung Kim <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
|
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
{
AddressSpace *as;
while (mr->container) {
mr = mr->container;
}
QTAILQ_FOREACH(as, &mr->uc->address_spaces, address_spaces_link) {
if (mr == as->root) {
return as;
}
}
return NULL;
}
| 0 |
[
"CWE-476"
] |
unicorn
|
3d3deac5e6d38602b689c4fef5dac004f07a2e63
| 249,959,836,253,944,700,000,000,000,000,000,000,000 | 14 |
Fix crash when mapping a big memory and calling uc_close
|
routerlist_reset_warnings(void)
{
if (!warned_nicknames)
warned_nicknames = smartlist_create();
SMARTLIST_FOREACH(warned_nicknames, char *, cp, tor_free(cp));
smartlist_clear(warned_nicknames); /* now the list is empty. */
networkstatus_reset_warnings();
}
| 0 |
[
"CWE-399"
] |
tor
|
308f6dad20675c42b29862f4269ad1fbfb00dc9a
| 62,800,593,631,900,230,000,000,000,000,000,000,000 | 9 |
Mitigate a side-channel leak of which relays Tor chooses for a circuit
Tor's and OpenSSL's current design guarantee that there are other leaks,
but this one is likely to be more easily exploitable, and is easy to fix.
|
gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int wid;
int w, wstart;
int thick = im->thick;
/* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no points need to be drawn */
if (!clip_1d(&x1,&y1,&x2,&y2,gdImageSX(im)) || !clip_1d(&y1,&x1,&y2,&x2,gdImageSY(im))) {
return;
}
dx = abs(x2 - x1);
dy = abs(y2 - y1);
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* Doug Claar: watch out for NaN in atan2 (2.0.5) */
if ((dx == 0) && (dy == 0)) {
wid = 1;
} else {
wid = (int)(thick * cos (atan2 (dy, dx)));
if (wid == 0) {
wid = 1;
}
}
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
/* Set up line thickness */
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel(im, x, w, color);
}
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, x, w, color);
}
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, x, w, color);
}
}
}
} else {
/* More-or-less vertical. use wid for horizontal stroke */
wid = (int)(thick * sin (atan2 (dy, dx)));
if (wid == 0) {
wid = 1;
}
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
/* Set up line thickness */
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetPixel (im, w, y, color);
}
}
}
}
}
| 0 |
[
"CWE-119"
] |
php-src
|
feba44546c27b0158f9ac20e72040a224b918c75
| 135,054,822,004,715,500,000,000,000,000,000,000,000 | 134 |
Fixed bug #22965 (Crash in gd lib's ImageFillToBorder()).
|
void CLASS wavelet_denoise()
{
float *fimg = 0, *temp, thold, mul[2], avg, diff;
int scale = 1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] = {0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044};
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000)
scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight * iwidth) < 0x15550000)
fimg = (float *)malloc((size * 3 + iheight + iwidth) * sizeof *fimg);
merror(fimg, "wavelet_denoise()");
temp = fimg + size * 3;
if ((nc = colors) == 3 && filters)
nc++;
#pragma omp parallel default(shared) private(i, col, row, thold, lev, lpass, hpass, temp, c) firstprivate(scale, size)
{
#pragma omp critical /* LibRaw's malloc is not local thread-safe */
temp = (float *)malloc((iheight + iwidth) * sizeof *fimg);
FORC(nc)
{ /* denoise R,G1,B,G3 individually */
#pragma omp for
for (i = 0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass = lev = 0; lev < 5; lev++)
{
lpass = size * ((lev & 1) + 1);
#pragma omp for
for (row = 0; row < iheight; row++)
{
hat_transform(temp, fimg + hpass + row * iwidth, 1, iwidth, 1 << lev);
for (col = 0; col < iwidth; col++)
fimg[lpass + row * iwidth + col] = temp[col] * 0.25;
}
#pragma omp for
for (col = 0; col < iwidth; col++)
{
hat_transform(temp, fimg + lpass + col, iwidth, iheight, 1 << lev);
for (row = 0; row < iheight; row++)
fimg[lpass + row * iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#pragma omp for
for (i = 0; i < size; i++)
{
fimg[hpass + i] -= fimg[lpass + i];
if (fimg[hpass + i] < -thold)
fimg[hpass + i] += thold;
else if (fimg[hpass + i] > thold)
fimg[hpass + i] -= thold;
else
fimg[hpass + i] = 0;
if (hpass)
fimg[i] += fimg[hpass + i];
}
hpass = lpass;
}
#pragma omp for
for (i = 0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i] + fimg[lpass + i]) / 0x10000);
}
#pragma omp critical
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3)
{ /* pull G1 and G3 closer together */
for (row = 0; row < 2; row++)
{
mul[row] = 0.125 * pre_mul[FC(row + 1, 0) | 1] / pre_mul[FC(row, 0) | 1];
blk[row] = cblack[FC(row, 0) | 1];
}
for (i = 0; i < 4; i++)
window[i] = (ushort *)fimg + width * i;
for (wlast = -1, row = 1; row < height - 1; row++)
{
while (wlast < row + 1)
{
for (wlast++, i = 0; i < 4; i++)
window[(i + 3) & 3] = window[i];
for (col = FC(wlast, 1) & 1; col < width; col += 2)
window[2][col] = BAYER(wlast, col);
}
thold = threshold / 512;
for (col = (FC(row, 0) & 1) + 1; col < width - 1; col += 2)
{
avg = (window[0][col - 1] + window[0][col + 1] + window[2][col - 1] + window[2][col + 1] - blk[~row & 1] * 4) *
mul[row & 1] +
(window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row, col)) - avg;
if (diff < -thold)
diff += thold;
else if (diff > thold)
diff -= thold;
else
diff = 0;
BAYER(row, col) = CLIP(SQR(avg + diff) + 0.5);
}
}
}
free(fimg);
}
| 0 |
[
"CWE-787"
] |
LibRaw
|
fbf60377c006eaea8d3eca3f5e4c654909dcdfd2
| 27,866,559,292,149,037,000,000,000,000,000,000,000 | 114 |
possible buffer overrun in Fuji makernotes parser
|
_c_public_ int c_shquote_quote(char **outp,
size_t *n_outp,
const char *in,
size_t n_in) {
size_t n_out = *n_outp;
char *out = *outp;
int r;
/*
* We always prepend and append a single quote. This will not produce
* optimal output, but ensures we produce the same output as other
* implementations do. If optimal output is needed, we can always
* provide an alternative implementation.
*/
r = c_shquote_append_char(&out, &n_out, '\'');
if (r)
return r;
while (n_in > 0) {
size_t len;
if (*in == '\'') {
const char *escape = "'\\''";
c_shquote_skip_char(&in, &n_in);
r = c_shquote_append_str(&out, &n_out, escape, strlen(escape));
if (r)
return r;
} else {
/*
* Consume until the next single quote. If none exists,
* consume the rest of the string.
*/
len = c_shquote_strncspn(in, n_in, "'");
r = c_shquote_consume_str(&out, &n_out, &in, &n_in, len);
if (r)
return r;
}
}
r = c_shquote_append_char(&out, &n_out, '\'');
if (r)
return r;
*outp = out;
*n_outp = n_out;
return 0;
}
| 0 |
[
"CWE-787"
] |
c-shquote
|
7fd15f8e272136955f7ffc37df29fbca9ddceca1
| 90,315,678,858,149,170,000,000,000,000,000,000,000 | 50 |
strnspn: fix buffer overflow
Fix the strnspn and strncspn functions to use a properly sized buffer.
It used to be 1 byte too short. Checking for `0xff` in a string will
thus write `0xff` once byte beyond the stack space of the local buffer.
Note that the public API does not allow to pass `0xff` to those
functions. Therefore, this is a read-only buffer overrun, possibly
causing bogus reports from the parser, but still well-defined.
Reported-by: Steffen Robertz
Signed-off-by: David Rheinsberg <[email protected]>
|
static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->packets_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
tp->lost_out -= decr;
/* Reno case is special. Sigh... */
if (tcp_is_reno(tp) && decr > 0)
tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
if (tp->lost_skb_hint &&
before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
tp->lost_cnt_hint -= decr;
tcp_verify_left_out(tp);
}
| 0 |
[
"CWE-190"
] |
net
|
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
| 224,183,515,290,566,460,000,000,000,000,000,000,000 | 24 |
tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int Item::save_int_in_field(Field *field, bool no_conversions)
{
longlong nr= val_int();
if (null_value)
return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
return field->store(nr, unsigned_flag);
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 173,721,939,836,920,130,000,000,000,000,000,000,000 | 8 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
static void esp_lower_irq(ESPState *s)
{
if (s->rregs[ESP_RSTAT] & STAT_INT) {
s->rregs[ESP_RSTAT] &= ~STAT_INT;
qemu_irq_lower(s->irq);
trace_esp_lower_irq();
}
}
| 0 |
[
"CWE-787"
] |
qemu
|
926cde5f3e4d2504ed161ed0cb771ac7cad6fd11
| 139,239,080,008,458,640,000,000,000,000,000,000,000 | 8 |
scsi: esp: make cmdbuf big enough for maximum CDB size
While doing DMA read into ESP command buffer 's->cmdbuf', it could
write past the 's->cmdbuf' area, if it was transferring more than 16
bytes. Increase the command buffer size to 32, which is maximum when
's->do_cmd' is set, and add a check on 'len' to avoid OOB access.
Reported-by: Li Qiang <[email protected]>
Signed-off-by: Prasad J Pandit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static bool LookupPredicate(const int32_t* table, uint16_t size, uchar chr) {
static const int kEntryDist = 1;
uint16_t value = chr & (kChunkBits - 1);
unsigned int low = 0;
unsigned int high = size - 1;
while (high != low) {
unsigned int mid = low + ((high - low) >> 1);
uchar current_value = GetEntry(TableGet<kEntryDist>(table, mid));
// If we've found an entry less than or equal to this one, and the
// next one is not also less than this one, we've arrived.
if ((current_value <= value) &&
(mid + 1 == size ||
GetEntry(TableGet<kEntryDist>(table, mid + 1)) > value)) {
low = mid;
break;
} else if (current_value < value) {
low = mid + 1;
} else if (current_value > value) {
// If we've just checked the bottom-most value and it's not
// the one we're looking for, we're done.
if (mid == 0) break;
high = mid - 1;
}
}
int32_t field = TableGet<kEntryDist>(table, low);
uchar entry = GetEntry(field);
bool is_start = IsStart(field);
return (entry == value) || (entry < value && is_start);
}
| 0 |
[
"CWE-119"
] |
node
|
78b0e30954111cfaba0edbeee85450d8cbc6fdf6
| 242,960,315,321,535,600,000,000,000,000,000,000,000 | 29 |
deps: fix out-of-band write in utf8 decoder
Originally reported by: Kris Reeves <[email protected]>
Reviewed-By: Trevor Norris <[email protected]>
|
static void zgfx_history_buffer_ring_write(ZGFX_CONTEXT* zgfx, const BYTE* src, size_t count)
{
UINT32 front;
if (count <= 0)
return;
if (count > zgfx->HistoryBufferSize)
{
const size_t residue = count - zgfx->HistoryBufferSize;
count = zgfx->HistoryBufferSize;
src += residue;
zgfx->HistoryIndex = (zgfx->HistoryIndex + residue) % zgfx->HistoryBufferSize;
}
if (zgfx->HistoryIndex + count <= zgfx->HistoryBufferSize)
{
CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, count);
if ((zgfx->HistoryIndex += count) == zgfx->HistoryBufferSize)
zgfx->HistoryIndex = 0;
}
else
{
front = zgfx->HistoryBufferSize - zgfx->HistoryIndex;
CopyMemory(&(zgfx->HistoryBuffer[zgfx->HistoryIndex]), src, front);
CopyMemory(zgfx->HistoryBuffer, &src[front], count - front);
zgfx->HistoryIndex = count - front;
}
}
| 0 |
[
"CWE-119",
"CWE-125",
"CWE-787"
] |
FreeRDP
|
17c363a5162fd4dc77b1df54e48d7bd9bf6b3be7
| 191,934,114,109,007,830,000,000,000,000,000,000,000 | 30 |
Fixed CVE-2018-8784
Thanks to Eyal Itkin from Check Point Software Technologies.
|
XML_SetEntityDeclHandler(XML_Parser parser, XML_EntityDeclHandler handler) {
if (parser != NULL)
parser->m_entityDeclHandler = handler;
}
| 0 |
[
"CWE-611",
"CWE-776",
"CWE-415",
"CWE-125"
] |
libexpat
|
c20b758c332d9a13afbbb276d30db1d183a85d43
| 6,247,720,006,478,737,000,000,000,000,000,000,000 | 4 |
xmlparse.c: Deny internal entities closing the doctype
|
PHP_MINFO_FUNCTION(openssl)
{
php_info_print_table_start();
php_info_print_table_row(2, "OpenSSL support", "enabled");
php_info_print_table_row(2, "OpenSSL Library Version", SSLeay_version(SSLEAY_VERSION));
php_info_print_table_row(2, "OpenSSL Header Version", OPENSSL_VERSION_TEXT);
php_info_print_table_row(2, "Openssl default config", default_ssl_conf_filename);
php_info_print_table_end();
DISPLAY_INI_ENTRIES();
}
| 0 |
[
"CWE-754"
] |
php-src
|
89637c6b41b510c20d262c17483f582f115c66d6
| 3,467,913,515,198,351,400,000,000,000,000,000,000 | 10 |
Fix bug #74651 - check EVP_SealInit as it can return -1
|
_copyDistinctExpr(const DistinctExpr *from)
{
DistinctExpr *newnode = makeNode(DistinctExpr);
COPY_SCALAR_FIELD(opno);
COPY_SCALAR_FIELD(opfuncid);
COPY_SCALAR_FIELD(opresulttype);
COPY_SCALAR_FIELD(opretset);
COPY_SCALAR_FIELD(opcollid);
COPY_SCALAR_FIELD(inputcollid);
COPY_NODE_FIELD(args);
COPY_LOCATION_FIELD(location);
return newnode;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 180,665,701,462,661,970,000,000,000,000,000,000,000 | 15 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
static void vnc_write_pixels_generic(VncState *vs,
void *pixels1, int size)
{
uint8_t buf[4];
if (VNC_SERVER_FB_BYTES == 4) {
uint32_t *pixels = pixels1;
int n, i;
n = size >> 2;
for (i = 0; i < n; i++) {
vnc_convert_pixel(vs, buf, pixels[i]);
vnc_write(vs, buf, vs->client_pf.bytes_per_pixel);
}
} else if (VNC_SERVER_FB_BYTES == 2) {
uint16_t *pixels = pixels1;
int n, i;
n = size >> 1;
for (i = 0; i < n; i++) {
vnc_convert_pixel(vs, buf, pixels[i]);
vnc_write(vs, buf, vs->client_pf.bytes_per_pixel);
}
} else if (VNC_SERVER_FB_BYTES == 1) {
uint8_t *pixels = pixels1;
int n, i;
n = size;
for (i = 0; i < n; i++) {
vnc_convert_pixel(vs, buf, pixels[i]);
vnc_write(vs, buf, vs->client_pf.bytes_per_pixel);
}
} else {
fprintf(stderr, "%s: VncState color depth not supported\n", __func__);
}
}
| 0 |
[
"CWE-125"
] |
qemu
|
9f64916da20eea67121d544698676295bbb105a7
| 203,353,971,033,265,070,000,000,000,000,000,000,000 | 33 |
pixman/vnc: use pixman images in vnc.
The vnc code uses *three* DisplaySurfaces:
First is the surface of the actual QemuConsole, usually the guest
screen, but could also be a text console (monitor/serial reachable via
Ctrl-Alt-<nr> keys). This is left as-is.
Second is the current server's view of the screen content. The vnc code
uses this to figure which parts of the guest screen did _really_ change
to reduce the amount of updates sent to the vnc clients. It is also
used as data source when sending out the updates to the clients. This
surface gets replaced by a pixman image. The format changes too,
instead of using the guest screen format we'll use fixed 32bit rgb
framebuffer and convert the pixels on the fly when comparing and
updating the server framebuffer.
Third surface carries the format expected by the vnc client. That isn't
used to store image data. This surface is switched to PixelFormat and a
boolean for bigendian byte order.
Signed-off-by: Gerd Hoffmann <[email protected]>
|
bool InstanceKlass::is_same_class_package(const Klass* class2) const {
oop classloader1 = this->class_loader();
PackageEntry* classpkg1 = this->package();
if (class2->is_objArray_klass()) {
class2 = ObjArrayKlass::cast(class2)->bottom_klass();
}
oop classloader2;
PackageEntry* classpkg2;
if (class2->is_instance_klass()) {
classloader2 = class2->class_loader();
classpkg2 = class2->package();
} else {
assert(class2->is_typeArray_klass(), "should be type array");
classloader2 = NULL;
classpkg2 = NULL;
}
// Same package is determined by comparing class loader
// and package entries. Both must be the same. This rule
// applies even to classes that are defined in the unnamed
// package, they still must have the same class loader.
if ((classloader1 == classloader2) && (classpkg1 == classpkg2)) {
return true;
}
return false;
}
| 0 |
[] |
jdk17u
|
f8eb9abe034f7c6bea4da05a9ea42017b3f80730
| 170,532,133,195,813,970,000,000,000,000,000,000,000 | 28 |
8270386: Better verification of scan methods
Reviewed-by: coleenp
Backport-of: ac329cef45979bd0159ecd1347e36f7129bb2ce4
|
gpg_sign_sync (CamelCipherContext *context,
const gchar *userid,
CamelCipherHash hash,
CamelMimePart *ipart,
CamelMimePart *opart,
GCancellable *cancellable,
GError **error)
{
struct _GpgCtx *gpg = NULL;
CamelCipherContextClass *class;
CamelStream *ostream = camel_stream_mem_new (), *istream;
CamelDataWrapper *dw;
CamelContentType *ct;
CamelMimePart *sigpart;
CamelMultipartSigned *mps;
gboolean success = FALSE;
/* Note: see rfc2015 or rfc3156, section 5 */
class = CAMEL_CIPHER_CONTEXT_GET_CLASS (context);
/* FIXME: stream this, we stream output at least */
istream = camel_stream_mem_new ();
if (camel_cipher_canonical_to_stream (
ipart, CAMEL_MIME_FILTER_CANON_STRIP |
CAMEL_MIME_FILTER_CANON_CRLF |
CAMEL_MIME_FILTER_CANON_FROM,
istream, NULL, error) == -1) {
g_prefix_error (
error, _("Could not generate signing data: "));
goto fail;
}
#ifdef GPG_LOG
if (camel_debug_start ("gpg:sign")) {
gchar *name;
CamelStream *out;
name = g_strdup_printf ("camel-gpg.%d.sign-data", logid++);
out = camel_stream_fs_new_with_name (name, O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (out) {
printf ("Writing gpg signing data to '%s'\n", name);
camel_stream_write_to_stream (istream, out);
g_seekable_seek (
G_SEEKABLE (istream), 0,
G_SEEK_SET, NULL, NULL);
g_object_unref (out);
}
g_free (name);
camel_debug_end ();
}
#endif
gpg = gpg_ctx_new (context);
gpg_ctx_set_mode (gpg, GPG_CTX_MODE_SIGN);
gpg_ctx_set_hash (gpg, hash);
gpg_ctx_set_armor (gpg, TRUE);
gpg_ctx_set_userid (gpg, userid);
gpg_ctx_set_istream (gpg, istream);
gpg_ctx_set_ostream (gpg, ostream);
if (!gpg_ctx_op_start (gpg, error))
goto fail;
while (!gpg_ctx_op_complete (gpg)) {
if (gpg_ctx_op_step (gpg, cancellable, error) == -1) {
gpg_ctx_op_cancel (gpg);
goto fail;
}
}
if (gpg_ctx_op_wait (gpg) != 0) {
const gchar *diagnostics;
diagnostics = gpg_ctx_get_diagnostics (gpg);
g_set_error (
error, CAMEL_ERROR, CAMEL_ERROR_GENERIC, "%s",
(diagnostics != NULL && *diagnostics != '\0') ?
diagnostics : _("Failed to execute gpg."));
goto fail;
}
success = TRUE;
dw = camel_data_wrapper_new ();
g_seekable_seek (G_SEEKABLE (ostream), 0, G_SEEK_SET, NULL, NULL);
camel_data_wrapper_construct_from_stream_sync (
dw, ostream, NULL, NULL);
sigpart = camel_mime_part_new ();
ct = camel_content_type_new ("application", "pgp-signature");
camel_content_type_set_param (ct, "name", "signature.asc");
camel_data_wrapper_set_mime_type_field (dw, ct);
camel_content_type_unref (ct);
camel_medium_set_content ((CamelMedium *) sigpart, dw);
g_object_unref (dw);
camel_mime_part_set_description (sigpart, "This is a digitally signed message part");
mps = camel_multipart_signed_new ();
ct = camel_content_type_new ("multipart", "signed");
camel_content_type_set_param (ct, "micalg", camel_cipher_context_hash_to_id (context, hash == CAMEL_CIPHER_HASH_DEFAULT ? gpg->hash : hash));
camel_content_type_set_param (ct, "protocol", class->sign_protocol);
camel_data_wrapper_set_mime_type_field ((CamelDataWrapper *) mps, ct);
camel_content_type_unref (ct);
camel_multipart_set_boundary ((CamelMultipart *) mps, NULL);
mps->signature = sigpart;
mps->contentraw = g_object_ref (istream);
g_seekable_seek (G_SEEKABLE (istream), 0, G_SEEK_SET, NULL, NULL);
camel_medium_set_content ((CamelMedium *) opart, (CamelDataWrapper *) mps);
fail:
g_object_unref (ostream);
if (gpg)
gpg_ctx_free (gpg);
return success;
}
| 0 |
[
"CWE-200"
] |
evolution-data-server
|
5d8b92c622f6927b253762ff9310479dd3ac627d
| 203,014,080,483,476,540,000,000,000,000,000,000,000 | 122 |
CamelGpgContext: Enclose email addresses in brackets.
The recipient list for encrypting can be specified by either key ID or
email address. Enclose email addresses in brackets to ensure an exact
match, as per the gpg man page:
HOW TO SPECIFY A USER ID
...
By exact match on an email address.
This is indicated by enclosing the email address in the
usual way with left and right angles.
<[email protected]>
Without the brackets gpg uses a substring match, which risks selecting
the wrong recipient.
|
void MarkWithTag(const StringPiece tag, NodeDef* node) {
AddNodeAttr(tag, true, node);
}
| 0 |
[
"CWE-476"
] |
tensorflow
|
e6340f0665d53716ef3197ada88936c2a5f7a2d3
| 327,281,410,790,122,250,000,000,000,000,000,000,000 | 3 |
Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b
|
static inline bool cpu_has_vmx_eptp_writeback(void)
{
return vmx_capability.ept & VMX_EPTP_WB_BIT;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
9581d442b9058d3699b4be568b6e5eae38a41493
| 218,251,154,516,140,000,000,000,000,000,000,000,000 | 4 |
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
dst_hold(dst);
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
| 0 |
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
| 115,065,033,583,567,600,000,000,000,000,000,000,000 | 8 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
|
static int sapi_extract_response_code(const char *header_line)
{
int code = 200;
const char *ptr;
for (ptr = header_line; *ptr; ptr++) {
if (*ptr == ' ' && *(ptr + 1) != ' ') {
code = atoi(ptr + 1);
break;
}
}
return code;
}
| 1 |
[
"CWE-601"
] |
php-src
|
98b9dfaec95e6f910f125ed172cdbd25abd006ec
| 311,020,962,163,400,860,000,000,000,000,000,000,000 | 14 |
Fix for HTTP_PROXY issue.
The following changes are made:
- _SERVER/_ENV only has HTTP_PROXY if the local environment has it,
and only one from the environment.
- getenv('HTTP_PROXY') only returns one from the local environment
- getenv has optional second parameter, telling it to only consider
local environment
|
ZEND_METHOD(exception, getPrevious)
{
zval *previous;
DEFAULT_0_PARAMS;
previous = zend_read_property(default_exception_ce, getThis(), "previous", sizeof("previous")-1, 1 TSRMLS_CC);
RETURN_ZVAL(previous, 1, 0);
}
| 0 |
[] |
php-src
|
a894a8155fab068d68a04bf181dbaddfa01ccbb0
| 110,648,977,039,498,790,000,000,000,000,000,000,000 | 9 |
More fixes for bug #69152
|
word_read_macro_info(int fd, macro_info_t *macro_info)
{
if(!read_uint16(fd, ¯o_info->count, FALSE)) {
cli_dbgmsg("read macro_info failed\n");
macro_info->count = 0;
return NULL;
}
cli_dbgmsg("macro count: %d\n", macro_info->count);
if(macro_info->count == 0)
return NULL;
macro_info->entries = (macro_entry_t *)cli_malloc(sizeof(macro_entry_t) * macro_info->count);
if(macro_info->entries == NULL) {
macro_info->count = 0;
return NULL;
}
if(!word_read_macro_entry(fd, macro_info)) {
free(macro_info->entries);
macro_info->count = 0;
return NULL;
}
return macro_info;
}
| 0 |
[
"CWE-399"
] |
clamav-devel
|
d21fb8d975f8c9688894a8cef4d50d977022e09f
| 207,733,052,550,284,200,000,000,000,000,000,000,000 | 22 |
libclamav/vba_extract.c: fix error path double free (bb#2486)
|
static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, const u8 *mac)
{
struct rtl8xxxu_priv *priv = hw->priv;
u8 val8;
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
val8 |= BEACON_DISABLE_TSF_UPDATE;
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c
| 91,267,180,463,007,090,000,000,000,000,000,000,000 | 10 |
rtl8xxxu: prevent leaking urb
In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb
should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Reviewed-by: Chris Chiu <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
static void setup_rw_floppy(void)
{
int i;
int r;
int flags;
unsigned long ready_date;
void (*function)(void);
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
flags |= FD_RAW_INTR;
if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) {
ready_date = DRS->spinup_date + DP->spinup;
/* If spinup will take a long time, rerun scandrives
* again just before spinup completion. Beware that
* after scandrives, we must again wait for selection.
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
function = floppy_start;
} else
function = setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
return;
}
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
if (flags & FD_RAW_INTR)
do_floppy = main_command_interrupt;
r = 0;
for (i = 0; i < raw_cmd->cmd_count; i++)
r |= output_byte(raw_cmd->cmd[i]);
debugt(__func__, "rw_command");
if (r) {
cont->error();
reset_fdc();
return;
}
if (!(flags & FD_RAW_INTR)) {
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
fd_watchdog();
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
linux
|
da99466ac243f15fbba65bd261bfc75ffa1532b6
| 31,064,685,260,950,975,000,000,000,000,000,000,000 | 52 |
floppy: fix out-of-bounds read in copy_buffer
This fixes a global out-of-bounds read access in the copy_buffer
function of the floppy driver.
The FDDEFPRM ioctl allows one to set the geometry of a disk. The sect
and head fields (unsigned int) of the floppy_drive structure are used to
compute the max_sector (int) in the make_raw_rw_request function. It is
possible to overflow the max_sector. Next, max_sector is passed to the
copy_buffer function and used in one of the memcpy calls.
An unprivileged user could trigger the bug if the device is accessible,
but requires a floppy disk to be inserted.
The patch adds the check for the .sect * .head multiplication for not
overflowing in the set_geometry function.
The bug was found by syzkaller.
Signed-off-by: Denis Efremov <[email protected]>
Tested-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void acm_tty_unthrottle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
clear_bit(ACM_THROTTLED, &acm->flags);
/* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */
smp_mb();
acm_submit_read_urbs(acm, GFP_KERNEL);
}
| 0 |
[
"CWE-416"
] |
linux
|
c52873e5a1ef72f845526d9f6a50704433f9c625
| 330,911,141,747,486,170,000,000,000,000,000,000,000 | 11 |
usb: cdc-acm: make sure a refcount is taken early enough
destroy() will decrement the refcount on the interface, so that
it needs to be taken so early that it never undercounts.
Fixes: 7fb57a019f94e ("USB: cdc-acm: Fix potential deadlock (lockdep warning)")
Cc: stable <[email protected]>
Reported-and-tested-by: [email protected]
Signed-off-by: Oliver Neukum <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
struct address_space *mapping = NULL;
struct rb_root *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
bool start_changed = false, end_changed = false;
long adjust_next = 0;
int remove_next = 0;
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
* perhaps the one after too (mprotect case 6).
* The only other cases that gets here are
* case 1, case 7 and case 8.
*/
if (next == expand) {
/*
* The only case where we don't expand "vma"
* and we expand "next" instead is case 8.
*/
VM_WARN_ON(end != next->vm_end);
/*
* remove_next == 3 means we're
* removing "vma" and that to do so we
* swapped "vma" and "next".
*/
remove_next = 3;
VM_WARN_ON(file != next->vm_file);
swap(vma, next);
} else {
VM_WARN_ON(expand != vma);
/*
* case 1, 6, 7, remove_next == 2 is case 6,
* remove_next == 1 is case 1 or 7.
*/
remove_next = 1 + (end > next->vm_end);
VM_WARN_ON(remove_next == 2 &&
end != next->vm_next->vm_end);
VM_WARN_ON(remove_next == 1 &&
end != next->vm_end);
/* trim end to next, for case 6 first pass */
end = next->vm_end;
}
exporter = next;
importer = vma;
/*
* If next doesn't have anon_vma, import from vma after
* next, if the vma overlaps with it.
*/
if (remove_next == 2 && !next->anon_vma)
exporter = next->vm_next;
} else if (end > next->vm_start) {
/*
* vma expands, overlapping part of the next:
* mprotect case 5 shifting the boundary up.
*/
adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
exporter = next;
importer = vma;
VM_WARN_ON(expand != importer);
} else if (end < vma->vm_end) {
/*
* vma shrinks, and !insert tells it's not
* split_vma inserting another: so it must be
* mprotect case 4 shifting the boundary down.
*/
adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
exporter = vma;
importer = next;
VM_WARN_ON(expand != importer);
}
/*
* Easily overlooked: when mprotect shifts the boundary,
* make sure the expanding vma has anon_vma set if the
* shrinking vma had, to cover any anon pages imported.
*/
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;
importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
if (error)
return error;
}
}
again:
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
if (file) {
mapping = file->f_mapping;
root = &mapping->i_mmap;
uprobe_munmap(vma, vma->vm_start, vma->vm_end);
if (adjust_next)
uprobe_munmap(next, next->vm_start, next->vm_end);
i_mmap_lock_write(mapping);
if (insert) {
/*
* Put into interval tree now, so instantiated pages
* are visible to arm/parisc __flush_dcache_page
* throughout; but we cannot insert into address
* space until vma start or end is updated.
*/
__vma_link_file(insert);
}
}
anon_vma = vma->anon_vma;
if (!anon_vma && adjust_next)
anon_vma = next->anon_vma;
if (anon_vma) {
VM_WARN_ON(adjust_next && next->anon_vma &&
anon_vma != next->anon_vma);
anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_pre_update_vma(next);
}
if (root) {
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, root);
if (adjust_next)
vma_interval_tree_remove(next, root);
}
if (start != vma->vm_start) {
vma->vm_start = start;
start_changed = true;
}
if (end != vma->vm_end) {
vma->vm_end = end;
end_changed = true;
}
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next << PAGE_SHIFT;
next->vm_pgoff += adjust_next;
}
if (root) {
if (adjust_next)
vma_interval_tree_insert(next, root);
vma_interval_tree_insert(vma, root);
flush_dcache_mmap_unlock(mapping);
}
if (remove_next) {
/*
* vma_merge has merged next into vma, and needs
* us to remove next before dropping the locks.
*/
if (remove_next != 3)
__vma_unlink_prev(mm, next, vma);
else
/*
* vma is not before next if they've been
* swapped.
*
* pre-swap() next->vm_start was reduced so
* tell validate_mm_rb to ignore pre-swap()
* "next" (which is stored in post-swap()
* "vma").
*/
__vma_unlink_common(mm, next, NULL, false, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
} else if (insert) {
/*
* split_vma has split insert from vma, and needs
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
__insert_vm_struct(mm, insert);
} else {
if (start_changed)
vma_gap_update(vma);
if (end_changed) {
if (!next)
mm->highest_vm_end = vm_end_gap(vma);
else if (!adjust_next)
vma_gap_update(next);
}
}
if (anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_post_update_vma(next);
anon_vma_unlock_write(anon_vma);
}
if (mapping)
i_mmap_unlock_write(mapping);
if (root) {
uprobe_mmap(vma);
if (adjust_next)
uprobe_mmap(next);
}
if (remove_next) {
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
fput(file);
}
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
* we must remove another next too. It would clutter
* up the code too much to do both in one go.
*/
if (remove_next != 3) {
/*
* If "next" was removed and vma->vm_end was
* expanded (up) over it, in turn
* "next->vm_prev->vm_end" changed and the
* "vma->vm_next" gap must be updated.
*/
next = vma->vm_next;
} else {
/*
* For the scope of the comment "next" and
* "vma" considered pre-swap(): if "vma" was
* removed, next->vm_start was expanded (down)
* over it and the "next" gap must be updated.
* Because of the swap() the post-swap() "vma"
* actually points to pre-swap() "next"
* (post-swap() "next" as opposed is now a
* dangling pointer).
*/
next = vma;
}
if (remove_next == 2) {
remove_next = 1;
end = next->vm_end;
goto again;
}
else if (next)
vma_gap_update(next);
else {
/*
* If remove_next == 2 we obviously can't
* reach this path.
*
* If remove_next == 3 we can't reach this
* path because pre-swap() next is always not
* NULL. pre-swap() "next" is not being
* removed and its next->vm_end is not altered
* (and furthermore "end" already matches
* next->vm_end in remove_next == 3).
*
* We reach this only in the remove_next == 1
* case if the "next" vma that was removed was
* the highest vma of the mm. However in such
* case next->vm_end == "end" and the extended
* "vma" has vma->vm_end == next->vm_end so
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
}
}
if (insert && file)
uprobe_mmap(insert);
validate_mm(mm);
return 0;
}
| 0 |
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
| 321,478,599,163,085,500,000,000,000,000,000,000,000 | 288 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>
|
CharSet conv_get_charset_from_str(const gchar *charset)
{
GHashTable *table;
if (!charset) return C_AUTO;
table = conv_get_charset_from_str_table();
return GPOINTER_TO_UINT(g_hash_table_lookup(table, charset));
}
| 0 |
[
"CWE-119"
] |
claws
|
d390fa07f5548f3173dd9cc13b233db5ce934c82
| 62,223,875,190,795,400,000,000,000,000,000,000,000 | 9 |
Make sure we don't run out of the output buffer. Maybe fixes bug #3557
|
populate_hash_table_from_refs_map (GHashTable *ret_all_refs,
GHashTable *ref_timestamps,
VarRefMapRef ref_map,
const char *opt_collection_id,
FlatpakRemoteState *state)
{
gsize len, i;
len = var_ref_map_get_length (ref_map);
for (i = 0; i < len; i++)
{
VarRefMapEntryRef entry = var_ref_map_get_at (ref_map, i);
const char *ref_name = var_ref_map_entry_get_ref (entry);
const guint8 *csum_bytes;
gsize csum_len;
VarRefInfoRef info;
guint64 *new_timestamp = NULL;
g_autoptr(FlatpakDecomposed) decomposed = NULL;
if (!flatpak_remote_state_allow_ref (state, ref_name))
continue;
info = var_ref_map_entry_get_info (entry);
csum_bytes = var_ref_info_peek_checksum (info, &csum_len);
if (csum_len != OSTREE_SHA256_DIGEST_LEN)
continue;
decomposed = flatpak_decomposed_new_from_col_ref (ref_name, opt_collection_id, NULL);
if (decomposed == NULL)
continue;
if (ref_timestamps)
{
guint64 timestamp = get_timestamp_from_ref_info (info);
gpointer value;
if (g_hash_table_lookup_extended (ref_timestamps, ref_name, NULL, &value))
{
guint64 *old_timestamp = value;
if (*old_timestamp >= timestamp)
continue; /* New timestamp is older, skip this commit */
}
new_timestamp = g_memdup (×tamp, sizeof (guint64));
}
g_hash_table_replace (ret_all_refs, g_steal_pointer (&decomposed), ostree_checksum_from_bytes (csum_bytes));
if (new_timestamp)
g_hash_table_replace (ref_timestamps, g_strdup (ref_name), new_timestamp);
}
}
| 0 |
[
"CWE-74"
] |
flatpak
|
fb473cad801c6b61706353256cab32330557374a
| 168,114,217,684,985,200,000,000,000,000,000,000,000 | 52 |
dir: Pass environment via bwrap --setenv when running apply_extra
This means we can systematically pass the environment variables
through bwrap(1), even if it is setuid and thus is filtering out
security-sensitive environment variables. bwrap ends up being
run with an empty environment instead.
As with the previous commit, this regressed while fixing CVE-2021-21261.
Fixes: 6d1773d2 "run: Convert all environment variables into bwrap arguments"
Signed-off-by: Simon McVittie <[email protected]>
|
static bool make_krb5_skew_error(DATA_BLOB *pblob_out)
{
krb5_context context = NULL;
krb5_error_code kerr = 0;
krb5_data reply;
krb5_principal host_princ = NULL;
char *host_princ_s = NULL;
bool ret = False;
*pblob_out = data_blob_null;
initialize_krb5_error_table();
kerr = krb5_init_context(&context);
if (kerr) {
return False;
}
/* Create server principal. */
asprintf(&host_princ_s, "%s$@%s", global_myname(), lp_realm());
if (!host_princ_s) {
goto out;
}
strlower_m(host_princ_s);
kerr = smb_krb5_parse_name(context, host_princ_s, &host_princ);
if (kerr) {
DEBUG(10,("make_krb5_skew_error: smb_krb5_parse_name failed "
"for name %s: Error %s\n",
host_princ_s, error_message(kerr) ));
goto out;
}
kerr = smb_krb5_mk_error(context, KRB5KRB_AP_ERR_SKEW,
host_princ, &reply);
if (kerr) {
DEBUG(10,("make_krb5_skew_error: smb_krb5_mk_error "
"failed: Error %s\n",
error_message(kerr) ));
goto out;
}
*pblob_out = data_blob(reply.data, reply.length);
kerberos_free_data_contents(context,&reply);
ret = True;
out:
if (host_princ_s) {
SAFE_FREE(host_princ_s);
}
if (host_princ) {
krb5_free_principal(context, host_princ);
}
krb5_free_context(context);
return ret;
}
| 0 |
[
"CWE-119"
] |
samba
|
9280051bfba337458722fb157f3082f93cbd9f2b
| 337,317,219,100,476,200,000,000,000,000,000,000,000 | 55 |
s3: Fix an uninitialized variable read
Found by Laurent Gaffie <[email protected]>
Thanks for that,
Volker
Fix bug #7254 (An uninitialized variable read could cause an smbd crash).
|
k5_asn1_decode_bitstring(const uint8_t *asn1, size_t len,
uint8_t **bits_out, size_t *len_out)
{
uint8_t unused, *bits;
*bits_out = NULL;
*len_out = 0;
if (len == 0)
return ASN1_BAD_LENGTH;
unused = *asn1++;
len--;
if (unused > 7)
return ASN1_BAD_FORMAT;
bits = malloc(len);
if (bits == NULL)
return ENOMEM;
memcpy(bits, asn1, len);
if (len > 1)
bits[len - 1] &= (0xff << unused);
*bits_out = bits;
*len_out = len;
return 0;
}
| 0 |
[
"CWE-674",
"CWE-787"
] |
krb5
|
57415dda6cf04e73ffc3723be518eddfae599bfd
| 131,570,614,527,382,660,000,000,000,000,000,000,000 | 25 |
Add recursion limit for ASN.1 indefinite lengths
The libkrb5 ASN.1 decoder supports BER indefinite lengths. It
computes the tag length using recursion; the lack of a recursion limit
allows an attacker to overrun the stack and cause the process to
crash. Reported by Demi Obenour.
CVE-2020-28196:
In MIT krb5 releases 1.11 and later, an unauthenticated attacker can
cause a denial of service for any client or server to which it can
send an ASN.1-encoded Kerberos message of sufficient length.
ticket: 8959 (new)
tags: pullup
target_version: 1.18-next
target_version: 1.17-next
|
pipe_echo_finish (Pipe *pipe)
{
GIOStatus status;
gsize bytes_read;
char buf[512];
do {
bytes_read = 0;
status = g_io_channel_read_chars (pipe->channel,
buf,
sizeof (buf),
&bytes_read,
NULL);
if (bytes_read) {
fprintf (pipe->logf, "%.*s", (int) bytes_read, buf);
fflush (pipe->logf);
}
} while (status == G_IO_STATUS_NORMAL);
}
| 0 |
[] |
NetworkManager-vpnc
|
07ac18a32b4e361a27ef48ac757d36cbb46e8e12
| 254,153,970,876,680,900,000,000,000,000,000,000,000 | 19 |
service: disallow newlinies in configuration values (CVE-2018-10900)
The vpnc configuration format doesn't allow those. vpnc(8):
The values start exactly one space after the keywords, and run to the end
of line. This lets you put any kind of weird character (except CR, LF and
NUL) in your strings
We have no choice but to reject them. If we didn't it would allow the
user to inject arbitrary configuration directives with potential
security implications.
https://pulsesecurity.co.nz/advisories/NM-VPNC-Privesc
Reported by: Denis Andzakovic
|
R_API int r_config_set_getter(RConfig *cfg, const char *key, RConfigCallback cb) {
RConfigNode *node = r_config_node_get (cfg, key);
if (node) {
node->getter = cb;
return 1;
}
return 0;
}
| 0 |
[
"CWE-416"
] |
radare2
|
f85bc674b2a2256a364fe796351bc1971e106005
| 109,061,929,741,072,200,000,000,000,000,000,000,000 | 8 |
Fix #7698 - UAF in r_config_set when loading a dex
|
tuplesort_begin_heap(TupleDesc tupDesc,
int nkeys, AttrNumber *attNums,
Oid *sortOperators, Oid *sortCollations,
bool *nullsFirstFlags,
int workMem, bool randomAccess)
{
Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
MemoryContext oldcontext;
int i;
oldcontext = MemoryContextSwitchTo(state->sortcontext);
AssertArg(nkeys > 0);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
nkeys, workMem, randomAccess ? 't' : 'f');
#endif
state->nKeys = nkeys;
TRACE_POSTGRESQL_SORT_START(HEAP_SORT,
false, /* no unique check */
nkeys,
workMem,
randomAccess);
state->comparetup = comparetup_heap;
state->copytup = copytup_heap;
state->writetup = writetup_heap;
state->readtup = readtup_heap;
state->tupDesc = tupDesc; /* assume we need not copy tupDesc */
state->abbrevNext = 10;
/* Prepare SortSupport data for each column */
state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData));
for (i = 0; i < nkeys; i++)
{
SortSupport sortKey = state->sortKeys + i;
AssertArg(attNums[i] != 0);
AssertArg(sortOperators[i] != 0);
sortKey->ssup_cxt = CurrentMemoryContext;
sortKey->ssup_collation = sortCollations[i];
sortKey->ssup_nulls_first = nullsFirstFlags[i];
sortKey->ssup_attno = attNums[i];
/* Convey if abbreviation optimization is applicable in principle */
sortKey->abbreviate = (i == 0);
PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey);
}
/*
* The "onlyKey" optimization cannot be used with abbreviated keys, since
* tie-breaker comparisons may be required. Typically, the optimization is
* only of value to pass-by-value types anyway, whereas abbreviated keys
* are typically only of value to pass-by-reference types.
*/
if (nkeys == 1 && !state->sortKeys->abbrev_converter)
state->onlyKey = state->sortKeys;
MemoryContextSwitchTo(oldcontext);
return state;
}
| 0 |
[
"CWE-209"
] |
postgres
|
804b6b6db4dcfc590a468e7be390738f9f7755fb
| 76,086,571,875,305,630,000,000,000,000,000,000,000 | 70 |
Fix column-privilege leak in error-message paths
While building error messages to return to the user,
BuildIndexValueDescription, ExecBuildSlotValueDescription and
ri_ReportViolation would happily include the entire key or entire row in
the result returned to the user, even if the user didn't have access to
view all of the columns being included.
Instead, include only those columns which the user is providing or which
the user has select rights on. If the user does not have any rights
to view the table or any of the columns involved then no detail is
provided and a NULL value is returned from BuildIndexValueDescription
and ExecBuildSlotValueDescription. Note that, for key cases, the user
must have access to all of the columns for the key to be shown; a
partial key will not be returned.
Further, in master only, do not return any data for cases where row
security is enabled on the relation and row security should be applied
for the user. This required a bit of refactoring and moving of things
around related to RLS- note the addition of utils/misc/rls.c.
Back-patch all the way, as column-level privileges are now in all
supported versions.
This has been assigned CVE-2014-8161, but since the issue and the patch
have already been publicized on pgsql-hackers, there's no point in trying
to hide this commit.
|
void Monitor::_ms_dispatch(Message *m)
{
if (is_shutdown()) {
m->put();
return;
}
MonOpRequestRef op = op_tracker.create_request<MonOpRequest>(m);
bool src_is_mon = op->is_src_mon();
op->mark_event("mon:_ms_dispatch");
MonSession *s = op->get_session();
if (s && s->closed) {
return;
}
if (src_is_mon && s) {
ConnectionRef con = m->get_connection();
if (con->get_messenger() && con->get_features() != s->con_features) {
// only update features if this is a non-anonymous connection
dout(10) << __func__ << " feature change for " << m->get_source_inst()
<< " (was " << s->con_features
<< ", now " << con->get_features() << ")" << dendl;
// connection features changed - recreate session.
if (s->con && s->con != con) {
dout(10) << __func__ << " connection for " << m->get_source_inst()
<< " changed from session; mark down and replace" << dendl;
s->con->mark_down();
}
if (s->item.is_on_list()) {
// forwarded messages' sessions are not in the sessions map and
// exist only while the op is being handled.
remove_session(s);
}
s->put();
s = nullptr;
}
}
if (!s) {
// if the sender is not a monitor, make sure their first message for a
// session is an MAuth. If it is not, assume it's a stray message,
// and considering that we are creating a new session it is safe to
// assume that the sender hasn't authenticated yet, so we have no way
// of assessing whether we should handle it or not.
if (!src_is_mon && (m->get_type() != CEPH_MSG_AUTH &&
m->get_type() != CEPH_MSG_MON_GET_MAP &&
m->get_type() != CEPH_MSG_PING)) {
dout(1) << __func__ << " dropping stray message " << *m
<< " from " << m->get_source_inst() << dendl;
return;
}
ConnectionRef con = m->get_connection();
{
Mutex::Locker l(session_map_lock);
s = session_map.new_session(m->get_source_inst(), con.get());
}
assert(s);
con->set_priv(s->get());
dout(10) << __func__ << " new session " << s << " " << *s
<< " features 0x" << std::hex
<< s->con_features << std::dec << dendl;
op->set_session(s);
logger->set(l_mon_num_sessions, session_map.get_size());
logger->inc(l_mon_session_add);
if (src_is_mon) {
// give it monitor caps; the peer type has been authenticated
dout(5) << __func__ << " setting monitor caps on this connection" << dendl;
if (!s->caps.is_allow_all()) // but no need to repeatedly copy
s->caps = *mon_caps;
}
s->put();
} else {
dout(20) << __func__ << " existing session " << s << " for " << s->inst
<< dendl;
}
assert(s);
s->session_timeout = ceph_clock_now();
s->session_timeout += g_conf->mon_session_timeout;
if (s->auth_handler) {
s->entity_name = s->auth_handler->get_entity_name();
}
dout(20) << " caps " << s->caps.get_str() << dendl;
if ((is_synchronizing() ||
(s->global_id == 0 && !exited_quorum.is_zero())) &&
!src_is_mon &&
m->get_type() != CEPH_MSG_PING) {
waitlist_or_zap_client(op);
} else {
dispatch_op(op);
}
return;
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 104,830,496,609,198,760,000,000,000,000,000,000,000 | 99 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
_gcry_mpi_ec_get_affine (gcry_mpi_t x, gcry_mpi_t y, mpi_point_t point,
mpi_ec_t ctx)
{
if (!mpi_cmp_ui (point->z, 0))
return -1;
switch (ctx->model)
{
case MPI_EC_WEIERSTRASS: /* Using Jacobian coordinates. */
{
gcry_mpi_t z1, z2, z3;
z1 = mpi_new (0);
z2 = mpi_new (0);
ec_invm (z1, point->z, ctx); /* z1 = z^(-1) mod p */
ec_mulm (z2, z1, z1, ctx); /* z2 = z^(-2) mod p */
if (x)
ec_mulm (x, point->x, z2, ctx);
if (y)
{
z3 = mpi_new (0);
ec_mulm (z3, z2, z1, ctx); /* z3 = z^(-3) mod p */
ec_mulm (y, point->y, z3, ctx);
mpi_free (z3);
}
mpi_free (z2);
mpi_free (z1);
}
return 0;
case MPI_EC_MONTGOMERY:
{
if (x)
mpi_set (x, point->x);
if (y)
{
log_fatal ("%s: Getting Y-coordinate on %s is not supported\n",
"_gcry_mpi_ec_get_affine", "Montgomery");
return -1;
}
}
return 0;
case MPI_EC_EDWARDS:
{
gcry_mpi_t z;
z = mpi_new (0);
ec_invm (z, point->z, ctx);
mpi_resize (z, ctx->p->nlimbs);
z->nlimbs = ctx->p->nlimbs;
if (x)
{
mpi_resize (x, ctx->p->nlimbs);
x->nlimbs = ctx->p->nlimbs;
ctx->mulm (x, point->x, z, ctx);
}
if (y)
{
mpi_resize (y, ctx->p->nlimbs);
y->nlimbs = ctx->p->nlimbs;
ctx->mulm (y, point->y, z, ctx);
}
_gcry_mpi_release (z);
}
return 0;
default:
return -1;
}
}
| 0 |
[
"CWE-203"
] |
libgcrypt
|
b9577f7c89b4327edc09f2231bc8b31521102c79
| 213,532,462,898,573,580,000,000,000,000,000,000,000 | 78 |
ecc: Add mitigation against timing attack.
* cipher/ecc-ecdsa.c (_gcry_ecc_ecdsa_sign): Add the order N to K.
* mpi/ec.c (_gcry_mpi_ec_mul_point): Compute with NBITS of P or larger.
CVE-id: CVE-2019-13627
GnuPG-bug-id: 4626
Co-authored-by: Ján Jančár <[email protected]>
Signed-off-by: NIIBE Yutaka <[email protected]>
|
my_decimal *Item_sum_sum::val_decimal(my_decimal *val)
{
if (aggr)
aggr->endup();
if (Item_sum_sum::result_type() == DECIMAL_RESULT)
return null_value ? NULL : (dec_buffs + curr_dec_buff);
return val_decimal_from_real(val);
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 10,072,983,294,439,982,000,000,000,000,000,000,000 | 8 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
njs_array_handler_find_index(njs_vm_t *vm, njs_iterator_args_t *args,
njs_value_t *entry, int64_t n)
{
njs_int_t ret;
njs_value_t copy;
if (njs_is_valid(entry)) {
copy = *entry;
} else {
njs_set_undefined(©);
}
ret = njs_array_iterator_call(vm, args, ©, n);
if (njs_slow_path(ret != NJS_OK)) {
return ret;
}
if (njs_is_true(&vm->retval)) {
njs_set_number(&vm->retval, n);
return NJS_DONE;
}
return NJS_OK;
}
| 0 |
[
"CWE-703"
] |
njs
|
2e00e95473861846aa8538be87db07699d9f676d
| 181,009,754,343,389,930,000,000,000,000,000,000,000 | 26 |
Fixed Array.prototype.slice() with slow "this" argument.
Previously, when "this" argument was not a fast array, but the "deleted" array
was a fast array, the "deleted" array may be left in uninitialized state if
"this" argument had gaps.
This fix is to ensure that "deleted" is properly initialized.
This fixes #485 issue on Github.
|
void Filter::onStreamMaxDurationReached(UpstreamRequest& upstream_request) {
upstream_request.resetStream();
if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) {
return;
}
upstream_request.removeFromList(upstream_requests_);
cleanup();
if (downstream_response_started_ &&
!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_500_after_100")) {
callbacks_->streamInfo().setResponseCodeDetails(
StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached);
callbacks_->resetStream();
} else {
callbacks_->streamInfo().setResponseFlag(
StreamInfo::ResponseFlag::UpstreamMaxStreamDurationReached);
// sendLocalReply may instead reset the stream if downstream_response_started_ is true.
callbacks_->sendLocalReply(
Http::Code::RequestTimeout, "upstream max stream duration reached", modify_headers_,
absl::nullopt, StreamInfo::ResponseCodeDetails::get().UpstreamMaxStreamDurationReached);
}
}
| 0 |
[
"CWE-703"
] |
envoy
|
18871dbfb168d3512a10c78dd267ff7c03f564c6
| 58,190,109,795,343,080,000,000,000,000,000,000,000 | 24 |
[1.18] CVE-2022-21655
Crash with direct_response
Signed-off-by: Otto van der Schaaf <[email protected]>
|
mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventenable karg;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
"Unable to read in mpt_ioctl_eventenable struct @ %p\n",
__FILE__, __LINE__, uarg);
return -EFAULT;
}
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
ioc->name));
if (ioc->events == NULL) {
/* Have not yet allocated memory - do so now.
*/
int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
ioc->events = kzalloc(sz, GFP_KERNEL);
if (!ioc->events) {
printk(MYIOC_s_ERR_FMT
": ERROR - Insufficient memory to add adapter!\n",
ioc->name);
return -ENOMEM;
}
ioc->alloc_total += sz;
ioc->eventContext = 0;
}
/* Update the IOC event logging flag.
*/
ioc->eventTypes = karg.eventTypes;
return 0;
}
| 0 |
[
"CWE-362",
"CWE-369"
] |
linux
|
28d76df18f0ad5bcf5fa48510b225f0ed262a99b
| 201,666,579,921,515,800,000,000,000,000,000,000,000 | 36 |
scsi: mptfusion: Fix double fetch bug in ioctl
Tom Hatskevich reported that we look up "iocp" then, in the called
functions we do a second copy_from_user() and look it up again.
The problem that could cause is:
drivers/message/fusion/mptctl.c
674 /* All of these commands require an interrupt or
675 * are unknown/illegal.
676 */
677 if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
^^^^
We take this lock.
678 return ret;
679
680 if (cmd == MPTFWDOWNLOAD)
681 ret = mptctl_fw_download(arg);
^^^
Then the user memory changes and we look up "iocp" again but a different
one so now we are holding the incorrect lock and have a race condition.
682 else if (cmd == MPTCOMMAND)
683 ret = mptctl_mpt_command(arg);
The security impact of this bug is not as bad as it could have been
because these operations are all privileged and root already has
enormous destructive power. But it's still worth fixing.
This patch passes the "iocp" pointer to the functions to avoid the
second lookup. That deletes 100 lines of code from the driver so
it's a nice clean up as well.
Link: https://lore.kernel.org/r/20200114123414.GA7957@kadam
Reported-by: Tom Hatskevich <[email protected]>
Reviewed-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
static void add_property(Array &properties, xmlNodePtr node, Object value) {
const char *name = (char *)node->name;
if (name) {
int namelen = xmlStrlen(node->name);
String sname(name, namelen, CopyString);
if (properties.exists(sname)) {
Variant &existing = properties.lval(sname);
if (existing.is(KindOfArray)) {
existing.append(value);
} else {
Array newdata;
newdata.append(existing);
newdata.append(value);
properties.set(sname, newdata);
}
} else {
properties.set(sname, value);
}
}
}
| 0 |
[
"CWE-94"
] |
hhvm
|
95f96e7287effe2fcdfb9a5338d1a7e4f55b083b
| 152,029,599,590,155,550,000,000,000,000,000,000,000 | 21 |
Fix libxml_disable_entity_loader()
This wasn't calling requestInit and setting the libxml handler no null.
So the first time an error came along it would reset the handler from
no-op to reading again.
This is a much better fix, we set our custom handler in requestInit and
when libxml_disable_entity_loader we store that state as a member bool
ensuring requestInit is always called to set our own handler.
If the handler isn't inserted then the behavious is as before. The only
time this could go pear shaped is say we wanted to make the default be
off. In that case we'd need a global requestInit that is always called
since there are libxml references everywhere.
Reviewed By: @jdelong
Differential Revision: D1116686
|
static void ipgre_tunnel_encap_del_mpls_ops(void)
{
}
| 0 |
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
| 164,644,097,629,129,120,000,000,000,000,000,000,000 | 3 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
get_consumer_secret (GoaOAuthProvider *provider)
{
return GOA_GOOGLE_CONSUMER_SECRET;
}
| 0 |
[
"CWE-310"
] |
gnome-online-accounts
|
ecad8142e9ac519b9fc74b96dcb5531052bbffe1
| 120,922,654,683,718,190,000,000,000,000,000,000,000 | 4 |
Guard against invalid SSL certificates
None of the branded providers (eg., Google, Facebook and Windows Live)
should ever have an invalid certificate. So set "ssl-strict" on the
SoupSession object being used by GoaWebView.
Providers like ownCloud and Exchange might have to deal with
certificates that are not up to the mark. eg., self-signed
certificates. For those, show a warning when the account is being
created, and only proceed if the user decides to ignore it. In any
case, save the status of the certificate that was used to create the
account. So an account created with a valid certificate will never
work with an invalid one, and one created with an invalid certificate
will not throw any further warnings.
Fixes: CVE-2013-0240
|
static apr_status_t beam_send_cleanup(void *data)
{
h2_bucket_beam *beam = data;
/* sender is going away, clear up all references to its memory */
r_purge_sent(beam);
h2_blist_cleanup(&beam->send_list);
report_consumption(beam, NULL);
while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) {
h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
H2_BPROXY_REMOVE(proxy);
proxy->beam = NULL;
proxy->bsender = NULL;
}
h2_blist_cleanup(&beam->purge_list);
h2_blist_cleanup(&beam->hold_list);
beam->send_pool = NULL;
return APR_SUCCESS;
}
| 0 |
[
"CWE-400"
] |
mod_h2
|
83a2e3866918ce6567a683eb4c660688d047ee81
| 5,714,005,858,399,940,000,000,000,000,000,000,000 | 18 |
* fixes a race condition where aborting streams triggers an unnecessary timeout.
|
static void v4l_print_requestbuffers(const void *arg, bool write_only)
{
const struct v4l2_requestbuffers *p = arg;
pr_cont("count=%d, type=%s, memory=%s\n",
p->count,
prt_names(p->type, v4l2_type_names),
prt_names(p->memory, v4l2_memory_names));
}
| 0 |
[
"CWE-401"
] |
linux
|
fb18802a338b36f675a388fc03d2aa504a0d0899
| 33,686,675,527,174,310,000,000,000,000,000,000,000 | 9 |
media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <[email protected]>
Reported-by: [email protected]
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: [email protected]
Signed-off-by: Sakari Ailus <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
CtPtr ProtocolV1::handle_my_addr_write(int r) {
ldout(cct, 20) << __func__ << " r=" << r << dendl;
if (r < 0) {
ldout(cct, 2) << __func__ << " connect couldn't write my addr, "
<< cpp_strerror(r) << dendl;
return _fault();
}
ldout(cct, 10) << __func__ << " connect sent my addr "
<< messenger->get_myaddr_legacy() << dendl;
return CONTINUE(send_connect_message);
}
| 0 |
[
"CWE-294"
] |
ceph
|
6c14c2fb5650426285428dfe6ca1597e5ea1d07d
| 157,511,282,535,422,220,000,000,000,000,000,000,000 | 13 |
mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
|
init_decompression(struct archive_read *a, struct _7zip *zip,
const struct _7z_coder *coder1, const struct _7z_coder *coder2)
{
int r;
zip->codec = coder1->codec;
zip->codec2 = -1;
switch (zip->codec) {
case _7Z_COPY:
case _7Z_BZ2:
case _7Z_DEFLATE:
case _7Z_PPMD:
if (coder2 != NULL) {
if (coder2->codec != _7Z_X86 &&
coder2->codec != _7Z_X86_BCJ2) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Unsupported filter %lx for %lx",
coder2->codec, coder1->codec);
return (ARCHIVE_FAILED);
}
zip->codec2 = coder2->codec;
zip->bcj_state = 0;
if (coder2->codec == _7Z_X86)
x86_Init(zip);
}
break;
default:
break;
}
switch (zip->codec) {
case _7Z_COPY:
break;
case _7Z_LZMA: case _7Z_LZMA2:
#ifdef HAVE_LZMA_H
#if LZMA_VERSION_MAJOR >= 5
/* Effectively disable the limiter. */
#define LZMA_MEMLIMIT UINT64_MAX
#else
/* NOTE: This needs to check memory size which running system has. */
#define LZMA_MEMLIMIT (1U << 30)
#endif
{
lzma_options_delta delta_opt;
lzma_filter filters[LZMA_FILTERS_MAX];
#if LZMA_VERSION < 50010000
lzma_filter *ff;
#endif
int fi = 0;
if (zip->lzstream_valid) {
lzma_end(&(zip->lzstream));
zip->lzstream_valid = 0;
}
/*
* NOTE: liblzma incompletely handle the BCJ+LZMA compressed
* data made by 7-Zip because 7-Zip does not add End-Of-
* Payload Marker(EOPM) at the end of LZMA compressed data,
* and so liblzma cannot know the end of the compressed data
* without EOPM. So consequently liblzma will not return last
* three or four bytes of uncompressed data because
* LZMA_FILTER_X86 filter does not handle input data if its
* data size is less than five bytes. If liblzma detect EOPM
* or know the uncompressed data size, liblzma will flush out
* the remaining that three or four bytes of uncompressed
* data. That is why we have to use our converting program
* for BCJ+LZMA. If we were able to tell the uncompressed
* size to liblzma when using lzma_raw_decoder() liblzma
* could correctly deal with BCJ+LZMA. But unfortunately
* there is no way to do that.
* Discussion about this can be found at XZ Utils forum.
*/
if (coder2 != NULL) {
zip->codec2 = coder2->codec;
filters[fi].options = NULL;
switch (zip->codec2) {
case _7Z_X86:
if (zip->codec == _7Z_LZMA2) {
filters[fi].id = LZMA_FILTER_X86;
fi++;
} else
/* Use our filter. */
x86_Init(zip);
break;
case _7Z_X86_BCJ2:
/* Use our filter. */
zip->bcj_state = 0;
break;
case _7Z_DELTA:
filters[fi].id = LZMA_FILTER_DELTA;
memset(&delta_opt, 0, sizeof(delta_opt));
delta_opt.type = LZMA_DELTA_TYPE_BYTE;
delta_opt.dist = 1;
filters[fi].options = &delta_opt;
fi++;
break;
/* Following filters have not been tested yet. */
case _7Z_POWERPC:
filters[fi].id = LZMA_FILTER_POWERPC;
fi++;
break;
case _7Z_IA64:
filters[fi].id = LZMA_FILTER_IA64;
fi++;
break;
case _7Z_ARM:
filters[fi].id = LZMA_FILTER_ARM;
fi++;
break;
case _7Z_ARMTHUMB:
filters[fi].id = LZMA_FILTER_ARMTHUMB;
fi++;
break;
case _7Z_SPARC:
filters[fi].id = LZMA_FILTER_SPARC;
fi++;
break;
default:
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Unexpected codec ID: %lX", zip->codec2);
return (ARCHIVE_FAILED);
}
}
if (zip->codec == _7Z_LZMA2)
filters[fi].id = LZMA_FILTER_LZMA2;
else
filters[fi].id = LZMA_FILTER_LZMA1;
filters[fi].options = NULL;
#if LZMA_VERSION < 50010000
ff = &filters[fi];
#endif
r = lzma_properties_decode(&filters[fi], NULL,
coder1->properties, (size_t)coder1->propertiesSize);
if (r != LZMA_OK) {
set_error(a, r);
return (ARCHIVE_FAILED);
}
fi++;
filters[fi].id = LZMA_VLI_UNKNOWN;
filters[fi].options = NULL;
r = lzma_raw_decoder(&(zip->lzstream), filters);
#if LZMA_VERSION < 50010000
free(ff->options);
#endif
if (r != LZMA_OK) {
set_error(a, r);
return (ARCHIVE_FAILED);
}
zip->lzstream_valid = 1;
zip->lzstream.total_in = 0;
zip->lzstream.total_out = 0;
break;
}
#else
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"LZMA codec is unsupported");
return (ARCHIVE_FAILED);
#endif
case _7Z_BZ2:
#if defined(HAVE_BZLIB_H) && defined(BZ_CONFIG_ERROR)
if (zip->bzstream_valid) {
BZ2_bzDecompressEnd(&(zip->bzstream));
zip->bzstream_valid = 0;
}
r = BZ2_bzDecompressInit(&(zip->bzstream), 0, 0);
if (r == BZ_MEM_ERROR)
r = BZ2_bzDecompressInit(&(zip->bzstream), 0, 1);
if (r != BZ_OK) {
int err = ARCHIVE_ERRNO_MISC;
const char *detail = NULL;
switch (r) {
case BZ_PARAM_ERROR:
detail = "invalid setup parameter";
break;
case BZ_MEM_ERROR:
err = ENOMEM;
detail = "out of memory";
break;
case BZ_CONFIG_ERROR:
detail = "mis-compiled library";
break;
}
archive_set_error(&a->archive, err,
"Internal error initializing decompressor: %s",
detail != NULL ? detail : "??");
zip->bzstream_valid = 0;
return (ARCHIVE_FAILED);
}
zip->bzstream_valid = 1;
zip->bzstream.total_in_lo32 = 0;
zip->bzstream.total_in_hi32 = 0;
zip->bzstream.total_out_lo32 = 0;
zip->bzstream.total_out_hi32 = 0;
break;
#else
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"BZ2 codec is unsupported");
return (ARCHIVE_FAILED);
#endif
case _7Z_DEFLATE:
#ifdef HAVE_ZLIB_H
if (zip->stream_valid)
r = inflateReset(&(zip->stream));
else
r = inflateInit2(&(zip->stream),
-15 /* Don't check for zlib header */);
if (r != Z_OK) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Couldn't initialize zlib stream.");
return (ARCHIVE_FAILED);
}
zip->stream_valid = 1;
zip->stream.total_in = 0;
zip->stream.total_out = 0;
break;
#else
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"DEFLATE codec is unsupported");
return (ARCHIVE_FAILED);
#endif
case _7Z_PPMD:
{
unsigned order;
uint32_t msize;
if (zip->ppmd7_valid) {
__archive_ppmd7_functions.Ppmd7_Free(
&zip->ppmd7_context, &g_szalloc);
zip->ppmd7_valid = 0;
}
if (coder1->propertiesSize < 5) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Malformed PPMd parameter");
return (ARCHIVE_FAILED);
}
order = coder1->properties[0];
msize = archive_le32dec(&(coder1->properties[1]));
if (order < PPMD7_MIN_ORDER || order > PPMD7_MAX_ORDER ||
msize < PPMD7_MIN_MEM_SIZE || msize > PPMD7_MAX_MEM_SIZE) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Malformed PPMd parameter");
return (ARCHIVE_FAILED);
}
__archive_ppmd7_functions.Ppmd7_Construct(&zip->ppmd7_context);
r = __archive_ppmd7_functions.Ppmd7_Alloc(
&zip->ppmd7_context, msize, &g_szalloc);
if (r == 0) {
archive_set_error(&a->archive, ENOMEM,
"Coludn't allocate memory for PPMd");
return (ARCHIVE_FATAL);
}
__archive_ppmd7_functions.Ppmd7_Init(
&zip->ppmd7_context, order);
__archive_ppmd7_functions.Ppmd7z_RangeDec_CreateVTable(
&zip->range_dec);
zip->ppmd7_valid = 1;
zip->ppmd7_stat = 0;
zip->ppstream.overconsumed = 0;
zip->ppstream.total_in = 0;
zip->ppstream.total_out = 0;
break;
}
case _7Z_X86:
case _7Z_X86_BCJ2:
case _7Z_POWERPC:
case _7Z_IA64:
case _7Z_ARM:
case _7Z_ARMTHUMB:
case _7Z_SPARC:
case _7Z_DELTA:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Unexpected codec ID: %lX", zip->codec);
return (ARCHIVE_FAILED);
case _7Z_CRYPTO_MAIN_ZIP:
case _7Z_CRYPTO_RAR_29:
case _7Z_CRYPTO_AES_256_SHA_256:
if (a->entry) {
archive_entry_set_is_metadata_encrypted(a->entry, 1);
archive_entry_set_is_data_encrypted(a->entry, 1);
zip->has_encrypted_entries = 1;
}
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Crypto codec not supported yet (ID: 0x%lX)", zip->codec);
return (ARCHIVE_FAILED);
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Unknown codec ID: %lX", zip->codec);
return (ARCHIVE_FAILED);
}
return (ARCHIVE_OK);
}
| 0 |
[
"CWE-190",
"CWE-125"
] |
libarchive
|
e79ef306afe332faf22e9b442a2c6b59cb175573
| 40,597,307,932,580,190,000,000,000,000,000,000,000 | 301 |
Issue #718: Fix TALOS-CAN-152
If a 7-Zip archive declares a rediculously large number of substreams,
it can overflow an internal counter, leading a subsequent memory
allocation to be too small for the substream data.
Thanks to the Open Source and Threat Intelligence project at Cisco
for reporting this issue.
|
static uint8_t authorize_req(struct bt_gatt_server *server,
uint8_t opcode, uint16_t handle)
{
if (!server->authorize)
return 0;
return server->authorize(server->att, opcode, handle,
server->authorize_data);
}
| 0 |
[
"CWE-287"
] |
bluez
|
00da0fb4972cf59e1c075f313da81ea549cb8738
| 123,279,005,010,882,420,000,000,000,000,000,000,000 | 9 |
shared/gatt-server: Fix not properly checking for secure flags
When passing the mask to check_permissions all valid permissions for
the operation must be set including BT_ATT_PERM_SECURE flags.
|
int ssl_print_tmp_key(BIO *out, SSL *s)
{
EVP_PKEY *key;
if (!SSL_get_server_tmp_key(s, &key))
return 1;
BIO_puts(out, "Server Temp Key: ");
switch (EVP_PKEY_id(key))
{
case EVP_PKEY_RSA:
BIO_printf(out, "RSA, %d bits\n", EVP_PKEY_bits(key));
break;
case EVP_PKEY_DH:
BIO_printf(out, "DH, %d bits\n", EVP_PKEY_bits(key));
break;
case EVP_PKEY_EC:
{
EC_KEY *ec = EVP_PKEY_get1_EC_KEY(key);
int nid;
const char *cname;
nid = EC_GROUP_get_curve_name(EC_KEY_get0_group(ec));
EC_KEY_free(ec);
cname = EC_curve_nid2nist(nid);
if (!cname)
cname = OBJ_nid2sn(nid);
BIO_printf(out, "ECDH, %s, %d bits\n",
cname, EVP_PKEY_bits(key));
}
}
EVP_PKEY_free(key);
return 1;
}
| 0 |
[] |
openssl
|
a70da5b3ecc3160368529677006801c58cb369db
| 208,755,686,363,888,250,000,000,000,000,000,000,000 | 33 |
New functions to check a hostname email or IP address against a
certificate. Add options to s_client, s_server and x509 utilities
to print results of checks.
|
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
{
struct page **p;
if (maxsize > i->count)
maxsize = i->count;
if (unlikely(i->type & ITER_PIPE))
return pipe_get_pages_alloc(i, pages, maxsize, start);
iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base;
size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
int n;
int res;
addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE);
p = get_pages_array(n);
if (!p)
return -ENOMEM;
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
if (unlikely(res < 0)) {
kvfree(p);
return res;
}
*pages = p;
return (res == n ? len : res * PAGE_SIZE) - *start;
0;}),({
/* can't be more than PAGE_SIZE */
*start = v.bv_offset;
*pages = p = get_pages_array(1);
if (!p)
return -ENOMEM;
get_page(*p = v.bv_page);
return v.bv_len;
}),({
return -EFAULT;
})
)
return 0;
}
| 0 |
[
"CWE-200"
] |
linux
|
b9dc6f65bc5e232d1c05fe34b5daadc7e8bbf1fb
| 21,183,291,796,630,293,000,000,000,000,000,000,000 | 43 |
fix a fencepost error in pipe_advance()
The logics in pipe_advance() used to release all buffers past the new
position failed in cases when the number of buffers to release was equal
to pipe->buffers. If that happened, none of them had been released,
leaving pipe full. Worse, it was trivial to trigger and we end up with
pipe full of uninitialized pages. IOW, it's an infoleak.
Cc: [email protected] # v4.9
Reported-by: "Alan J. Wylie" <[email protected]>
Tested-by: "Alan J. Wylie" <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
void ScalarAddition(OpKernelContext* context, const quint8* full_input,
float full_input_min, float full_input_max,
int64 num_elements, quint8 scalar_input,
float scalar_input_min, float scalar_input_max,
float output_min, float output_max, qint32* output) {
const int32 scalar_in_output_range = RequantizeInNewRange<quint8, qint32>(
scalar_input, scalar_input_min, scalar_input_max, output_min, output_max);
const float input_0_float =
QuantizedToFloat<quint8>(0, full_input_min, full_input_max);
const float input_1_float =
QuantizedToFloat<quint8>(1, full_input_min, full_input_max);
const int64 input_0_int64 =
FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max);
const int64 input_1_int64 =
FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max);
const int32 input_mult_int32 = input_1_int64 - input_0_int64;
const int64 lowest_quantized =
static_cast<int64>(Eigen::NumTraits<qint32>::lowest());
const int64 highest_quantized =
static_cast<int64>(Eigen::NumTraits<qint32>::highest());
const int64x2_t input_0_64x2 = vmovq_n_s64(input_0_int64);
const int32x2_t input_mult_32x2 = vmov_n_s32(input_mult_int32);
const int32x4_t scalar_in_output_range_32x4 =
vmovq_n_s32(scalar_in_output_range);
int64 i = 0;
for (; i < (num_elements - 7); i += 8) {
const uint8* full_input_ptr = &(full_input->value) + i;
const std::array<int32x4_t, 2> output_value =
Requantize8x8To32Neon(full_input_ptr, input_0_64x2, input_mult_32x2);
const int32x4_t result_low_32x4 =
vaddq_s32(output_value[0], scalar_in_output_range_32x4);
const int32x4_t result_high_32x4 =
vaddq_s32(output_value[1], scalar_in_output_range_32x4);
int32* output_ptr = &(output->value) + i;
vst1q_s32(output_ptr + 0, result_low_32x4);
vst1q_s32(output_ptr + 4, result_high_32x4);
}
for (; i < num_elements; ++i) {
const int64 full_input_value = static_cast<int64>(full_input[i]);
int64 full_input_in_output_range_64 =
input_0_int64 + (full_input_value * input_mult_int32);
full_input_in_output_range_64 =
std::max(full_input_in_output_range_64, lowest_quantized);
full_input_in_output_range_64 =
std::min(full_input_in_output_range_64, highest_quantized);
const int32 full_input_in_output_range =
static_cast<int32>(full_input_in_output_range_64);
output[i] = full_input_in_output_range + scalar_in_output_range;
}
}
| 0 |
[
"CWE-369"
] |
tensorflow
|
744009c9e5cc5d0447f0dc39d055f917e1fd9e16
| 178,434,443,579,930,830,000,000,000,000,000,000,000 | 53 |
Validate work in `QuantizedAdd`, ensure at least one element.
PiperOrigin-RevId: 370127996
Change-Id: I57c6f3e01afdeada84737820a131590137463855
|
static Image *ReadDCMImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define ThrowDCMException(exception,message) \
{ \
if (info.scale != (Quantum *) NULL) \
info.scale=(Quantum *) RelinquishMagickMemory(info.scale); \
if (data != (unsigned char *) NULL) \
data=(unsigned char *) RelinquishMagickMemory(data); \
if (graymap != (int *) NULL) \
graymap=(int *) RelinquishMagickMemory(graymap); \
if (bluemap != (int *) NULL) \
bluemap=(int *) RelinquishMagickMemory(bluemap); \
if (greenmap != (int *) NULL) \
greenmap=(int *) RelinquishMagickMemory(greenmap); \
if (redmap != (int *) NULL) \
redmap=(int *) RelinquishMagickMemory(redmap); \
if (stream_info->offsets != (ssize_t *) NULL) \
stream_info->offsets=(ssize_t *) RelinquishMagickMemory( \
stream_info->offsets); \
if (stream_info != (DCMStreamInfo *) NULL) \
stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info); \
ThrowReaderException((exception),(message)); \
}
char
explicit_vr[MagickPathExtent],
implicit_vr[MagickPathExtent],
magick[MagickPathExtent],
photometric[MagickPathExtent];
DCMInfo
info;
DCMStreamInfo
*stream_info;
Image
*image;
int
*bluemap,
datum,
*greenmap,
*graymap,
*redmap;
MagickBooleanType
explicit_file,
explicit_retry,
use_explicit;
MagickOffsetType
offset;
register unsigned char
*p;
register ssize_t
i;
size_t
colors,
height,
length,
number_scenes,
quantum,
status,
width;
ssize_t
count,
scene;
unsigned char
*data;
unsigned short
group,
element;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image->depth=8UL;
image->endian=LSBEndian;
/*
Read DCM preamble.
*/
(void) memset(&info,0,sizeof(info));
data=(unsigned char *) NULL;
graymap=(int *) NULL;
redmap=(int *) NULL;
greenmap=(int *) NULL;
bluemap=(int *) NULL;
stream_info=(DCMStreamInfo *) AcquireMagickMemory(sizeof(*stream_info));
if (stream_info == (DCMStreamInfo *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(stream_info,0,sizeof(*stream_info));
count=ReadBlob(image,128,(unsigned char *) magick);
if (count != 128)
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
count=ReadBlob(image,4,(unsigned char *) magick);
if ((count != 4) || (LocaleNCompare(magick,"DICM",4) != 0))
{
offset=SeekBlob(image,0L,SEEK_SET);
if (offset < 0)
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
}
/*
Read DCM Medical image.
*/
(void) CopyMagickString(photometric,"MONOCHROME1 ",MagickPathExtent);
info.bits_allocated=8;
info.bytes_per_pixel=1;
info.depth=8;
info.mask=0xffff;
info.max_value=255UL;
info.samples_per_pixel=1;
info.signed_data=(~0UL);
info.rescale_slope=1.0;
data=(unsigned char *) NULL;
element=0;
explicit_vr[2]='\0';
explicit_file=MagickFalse;
colors=0;
redmap=(int *) NULL;
greenmap=(int *) NULL;
bluemap=(int *) NULL;
graymap=(int *) NULL;
height=0;
number_scenes=1;
use_explicit=MagickFalse;
explicit_retry = MagickFalse;
width=0;
while (TellBlob(image) < (MagickOffsetType) GetBlobSize(image))
{
for (group=0; (group != 0x7FE0) || (element != 0x0010) ; )
{
/*
Read a group.
*/
image->offset=(ssize_t) TellBlob(image);
group=ReadBlobLSBShort(image);
element=ReadBlobLSBShort(image);
if ((group == 0xfffc) && (element == 0xfffc))
break;
if ((group != 0x0002) && (image->endian == MSBEndian))
{
group=(unsigned short) ((group << 8) | ((group >> 8) & 0xFF));
element=(unsigned short) ((element << 8) | ((element >> 8) & 0xFF));
}
quantum=0;
/*
Find corresponding VR for this group and element.
*/
for (i=0; dicom_info[i].group < 0xffff; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
(void) CopyMagickString(implicit_vr,dicom_info[i].vr,MagickPathExtent);
count=ReadBlob(image,2,(unsigned char *) explicit_vr);
if (count != 2)
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
/*
Check for "explicitness", but meta-file headers always explicit.
*/
if ((explicit_file == MagickFalse) && (group != 0x0002))
explicit_file=(isupper((unsigned char) *explicit_vr) != MagickFalse) &&
(isupper((unsigned char) *(explicit_vr+1)) != MagickFalse) ?
MagickTrue : MagickFalse;
use_explicit=((group == 0x0002) && (explicit_retry == MagickFalse)) ||
(explicit_file != MagickFalse) ? MagickTrue : MagickFalse;
if ((use_explicit != MagickFalse) && (strncmp(implicit_vr,"xs",2) == 0))
(void) CopyMagickString(implicit_vr,explicit_vr,MagickPathExtent);
if ((use_explicit == MagickFalse) || (strncmp(implicit_vr,"!!",2) == 0))
{
offset=SeekBlob(image,(MagickOffsetType) -2,SEEK_CUR);
if (offset < 0)
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
quantum=4;
}
else
{
/*
Assume explicit type.
*/
quantum=2;
if ((strncmp(explicit_vr,"OB",2) == 0) ||
(strncmp(explicit_vr,"UN",2) == 0) ||
(strncmp(explicit_vr,"OW",2) == 0) ||
(strncmp(explicit_vr,"SQ",2) == 0))
{
(void) ReadBlobLSBShort(image);
quantum=4;
}
}
datum=0;
if (quantum == 4)
{
if (group == 0x0002)
datum=ReadBlobLSBSignedLong(image);
else
datum=ReadBlobSignedLong(image);
}
else
if (quantum == 2)
{
if (group == 0x0002)
datum=ReadBlobLSBSignedShort(image);
else
datum=ReadBlobSignedShort(image);
}
quantum=0;
length=1;
if (datum != 0)
{
if ((strncmp(implicit_vr,"OW",2) == 0) ||
(strncmp(implicit_vr,"SS",2) == 0) ||
(strncmp(implicit_vr,"US",2) == 0))
quantum=2;
else
if ((strncmp(implicit_vr,"FL",2) == 0) ||
(strncmp(implicit_vr,"OF",2) == 0) ||
(strncmp(implicit_vr,"SL",2) == 0) ||
(strncmp(implicit_vr,"UL",2) == 0))
quantum=4;
else
if (strncmp(implicit_vr,"FD",2) == 0)
quantum=8;
else
quantum=1;
if (datum != ~0)
length=(size_t) datum/quantum;
else
{
/*
Sequence and item of undefined length.
*/
quantum=0;
length=0;
}
}
if (image_info->verbose != MagickFalse)
{
/*
Display Dicom info.
*/
if (use_explicit == MagickFalse)
explicit_vr[0]='\0';
for (i=0; dicom_info[i].description != (char *) NULL; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
(void) FormatLocaleFile(stdout,"0x%04lX %4ld %s-%s (0x%04lx,0x%04lx)",
(unsigned long) image->offset,(long) length,implicit_vr,explicit_vr,
(unsigned long) group,(unsigned long) element);
if (dicom_info[i].description != (char *) NULL)
(void) FormatLocaleFile(stdout," %s",dicom_info[i].description);
(void) FormatLocaleFile(stdout,": ");
}
if ((group == 0x7FE0) && (element == 0x0010))
{
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"\n");
break;
}
/*
Allocate space and read an array.
*/
data=(unsigned char *) NULL;
if ((length == 1) && (quantum == 1))
datum=ReadBlobByte(image);
else
if ((length == 1) && (quantum == 2))
{
if (group == 0x0002)
datum=ReadBlobLSBSignedShort(image);
else
datum=ReadBlobSignedShort(image);
}
else
if ((length == 1) && (quantum == 4))
{
if (group == 0x0002)
datum=ReadBlobLSBSignedLong(image);
else
datum=ReadBlobSignedLong(image);
}
else
if ((quantum != 0) && (length != 0))
{
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,
"InsufficientImageDataInFile");
if (~length >= 1)
data=(unsigned char *) AcquireQuantumMemory(length+1,quantum*
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowDCMException(ResourceLimitError,
"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) quantum*length,data);
if (count != (ssize_t) (quantum*length))
{
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"count=%d quantum=%d "
"length=%d group=%d\n",(int) count,(int) quantum,(int)
length,(int) group);
ThrowDCMException(CorruptImageError,
"InsufficientImageDataInFile");
}
data[length*quantum]='\0';
}
if ((((unsigned int) group << 16) | element) == 0xFFFEE0DD)
{
if (data != (unsigned char *) NULL)
data=(unsigned char *) RelinquishMagickMemory(data);
continue;
}
switch (group)
{
case 0x0002:
{
switch (element)
{
case 0x0010:
{
char
transfer_syntax[MagickPathExtent];
/*
Transfer Syntax.
*/
if ((datum == 0) && (explicit_retry == MagickFalse))
{
explicit_retry=MagickTrue;
(void) SeekBlob(image,(MagickOffsetType) 0,SEEK_SET);
group=0;
element=0;
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,
"Corrupted image - trying explicit format\n");
break;
}
*transfer_syntax='\0';
if (data != (unsigned char *) NULL)
(void) CopyMagickString(transfer_syntax,(char *) data,
MagickPathExtent);
if (image_info->verbose != MagickFalse)
(void) FormatLocaleFile(stdout,"transfer_syntax=%s\n",
(const char *) transfer_syntax);
if (strncmp(transfer_syntax,"1.2.840.10008.1.2",17) == 0)
{
int
subtype,
type;
type=1;
subtype=0;
if (strlen(transfer_syntax) > 17)
{
count=(ssize_t) sscanf(transfer_syntax+17,".%d.%d",&type,
&subtype);
if (count < 1)
ThrowDCMException(CorruptImageError,
"ImproperImageHeader");
}
switch (type)
{
case 1:
{
image->endian=LSBEndian;
break;
}
case 2:
{
image->endian=MSBEndian;
break;
}
case 4:
{
if ((subtype >= 80) && (subtype <= 81))
image->compression=JPEGCompression;
else
if ((subtype >= 90) && (subtype <= 93))
image->compression=JPEG2000Compression;
else
image->compression=JPEGCompression;
break;
}
case 5:
{
image->compression=RLECompression;
break;
}
}
}
break;
}
default:
break;
}
break;
}
case 0x0028:
{
switch (element)
{
case 0x0002:
{
/*
Samples per pixel.
*/
info.samples_per_pixel=(size_t) datum;
if ((info.samples_per_pixel == 0) || (info.samples_per_pixel > 4))
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
break;
}
case 0x0004:
{
/*
Photometric interpretation.
*/
if (data == (unsigned char *) NULL)
break;
for (i=0; i < (ssize_t) MagickMin(length,MagickPathExtent-1); i++)
photometric[i]=(char) data[i];
photometric[i]='\0';
info.polarity=LocaleCompare(photometric,"MONOCHROME1 ") == 0 ?
MagickTrue : MagickFalse;
break;
}
case 0x0006:
{
/*
Planar configuration.
*/
if (datum == 1)
image->interlace=PlaneInterlace;
break;
}
case 0x0008:
{
/*
Number of frames.
*/
if (data == (unsigned char *) NULL)
break;
number_scenes=StringToUnsignedLong((char *) data);
break;
}
case 0x0010:
{
/*
Image rows.
*/
height=(size_t) datum;
break;
}
case 0x0011:
{
/*
Image columns.
*/
width=(size_t) datum;
break;
}
case 0x0100:
{
/*
Bits allocated.
*/
info.bits_allocated=(size_t) datum;
info.bytes_per_pixel=1;
if (datum > 8)
info.bytes_per_pixel=2;
info.depth=info.bits_allocated;
if ((info.depth == 0) || (info.depth > 32))
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
info.max_value=(1UL << info.bits_allocated)-1;
image->depth=info.depth;
break;
}
case 0x0101:
{
/*
Bits stored.
*/
info.significant_bits=(size_t) datum;
info.bytes_per_pixel=1;
if (info.significant_bits > 8)
info.bytes_per_pixel=2;
info.depth=info.significant_bits;
if ((info.depth == 0) || (info.depth > 16))
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
info.max_value=(1UL << info.significant_bits)-1;
info.mask=(size_t) GetQuantumRange(info.significant_bits);
image->depth=info.depth;
break;
}
case 0x0102:
{
/*
High bit.
*/
break;
}
case 0x0103:
{
/*
Pixel representation.
*/
info.signed_data=(size_t) datum;
break;
}
case 0x1050:
{
/*
Visible pixel range: center.
*/
if (data != (unsigned char *) NULL)
info.window_center=StringToDouble((char *) data,(char **) NULL);
break;
}
case 0x1051:
{
/*
Visible pixel range: width.
*/
if (data != (unsigned char *) NULL)
info.window_width=StringToDouble((char *) data,(char **) NULL);
break;
}
case 0x1052:
{
/*
Rescale intercept
*/
if (data != (unsigned char *) NULL)
info.rescale_intercept=StringToDouble((char *) data,
(char **) NULL);
break;
}
case 0x1053:
{
/*
Rescale slope
*/
if (data != (unsigned char *) NULL)
info.rescale_slope=StringToDouble((char *) data,(char **) NULL);
break;
}
case 0x1200:
case 0x3006:
{
/*
Populate graymap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/info.bytes_per_pixel);
datum=(int) colors;
if (graymap != (int *) NULL)
graymap=(int *) RelinquishMagickMemory(graymap);
graymap=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*graymap));
if (graymap == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(graymap,0,MagickMax(colors,65536)*
sizeof(*graymap));
for (i=0; i < (ssize_t) colors; i++)
if (info.bytes_per_pixel == 1)
graymap[i]=(int) data[i];
else
graymap[i]=(int) ((short *) data)[i];
break;
}
case 0x1201:
{
unsigned short
index;
/*
Populate redmap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/2);
datum=(int) colors;
if (redmap != (int *) NULL)
redmap=(int *) RelinquishMagickMemory(redmap);
redmap=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*redmap));
if (redmap == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(redmap,0,MagickMax(colors,65536)*
sizeof(*redmap));
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
redmap[i]=(int) index;
p+=2;
}
break;
}
case 0x1202:
{
unsigned short
index;
/*
Populate greenmap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/2);
datum=(int) colors;
if (greenmap != (int *) NULL)
greenmap=(int *) RelinquishMagickMemory(greenmap);
greenmap=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*greenmap));
if (greenmap == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(greenmap,0,MagickMax(colors,65536)*
sizeof(*greenmap));
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
greenmap[i]=(int) index;
p+=2;
}
break;
}
case 0x1203:
{
unsigned short
index;
/*
Populate bluemap.
*/
if (data == (unsigned char *) NULL)
break;
colors=(size_t) (length/2);
datum=(int) colors;
if (bluemap != (int *) NULL)
bluemap=(int *) RelinquishMagickMemory(bluemap);
bluemap=(int *) AcquireQuantumMemory(MagickMax(colors,65536),
sizeof(*bluemap));
if (bluemap == (int *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(bluemap,0,MagickMax(colors,65536)*
sizeof(*bluemap));
p=data;
for (i=0; i < (ssize_t) colors; i++)
{
if (image->endian == MSBEndian)
index=(unsigned short) ((*p << 8) | *(p+1));
else
index=(unsigned short) (*p | (*(p+1) << 8));
bluemap[i]=(int) index;
p+=2;
}
break;
}
default:
break;
}
break;
}
case 0x2050:
{
switch (element)
{
case 0x0020:
{
if ((data != (unsigned char *) NULL) &&
(strncmp((char *) data,"INVERSE",7) == 0))
info.polarity=MagickTrue;
break;
}
default:
break;
}
break;
}
default:
break;
}
if (data != (unsigned char *) NULL)
{
char
*attribute;
for (i=0; dicom_info[i].description != (char *) NULL; i++)
if ((group == dicom_info[i].group) &&
(element == dicom_info[i].element))
break;
if (dicom_info[i].description != (char *) NULL)
{
attribute=AcquireString("dcm:");
(void) ConcatenateString(&attribute,dicom_info[i].description);
for (i=0; i < (ssize_t) MagickMax(length,4); i++)
if (isprint((int) data[i]) == MagickFalse)
break;
if ((i == (ssize_t) length) || (length > 4))
{
(void) SubstituteString(&attribute," ","");
(void) SetImageProperty(image,attribute,(char *) data,
exception);
}
attribute=DestroyString(attribute);
}
}
if (image_info->verbose != MagickFalse)
{
if (data == (unsigned char *) NULL)
(void) FormatLocaleFile(stdout,"%d\n",datum);
else
{
/*
Display group data.
*/
for (i=0; i < (ssize_t) MagickMax(length,4); i++)
if (isprint((int) data[i]) == MagickFalse)
break;
if ((i != (ssize_t) length) && (length <= 4))
{
ssize_t
j;
datum=0;
for (j=(ssize_t) length-1; j >= 0; j--)
datum=(256*datum+data[j]);
(void) FormatLocaleFile(stdout,"%d",datum);
}
else
for (i=0; i < (ssize_t) length; i++)
if (isprint((int) data[i]) != MagickFalse)
(void) FormatLocaleFile(stdout,"%c",data[i]);
else
(void) FormatLocaleFile(stdout,"%c",'.');
(void) FormatLocaleFile(stdout,"\n");
}
}
if (data != (unsigned char *) NULL)
data=(unsigned char *) RelinquishMagickMemory(data);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
}
if ((group == 0xfffc) && (element == 0xfffc))
{
Image
*last;
last=RemoveLastImageFromList(&image);
if (last != (Image *) NULL)
last=DestroyImage(last);
break;
}
if ((width == 0) || (height == 0))
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
image->columns=(size_t) width;
image->rows=(size_t) height;
if (info.signed_data == 0xffff)
info.signed_data=(size_t) (info.significant_bits == 16 ? 1 : 0);
if ((image->compression == JPEGCompression) ||
(image->compression == JPEG2000Compression))
{
Image
*images;
ImageInfo
*read_info;
int
c;
/*
Read offset table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
if (ReadBlobByte(image) == EOF)
break;
(void) (((ssize_t) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image));
length=(size_t) ReadBlobLSBLong(image);
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile");
stream_info->offset_count=length >> 2;
if (stream_info->offset_count != 0)
{
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *) RelinquishMagickMemory(
stream_info->offsets);
stream_info->offsets=(ssize_t *) AcquireQuantumMemory(
stream_info->offset_count,sizeof(*stream_info->offsets));
if (stream_info->offsets == (ssize_t *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image);
offset=TellBlob(image);
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]+=offset;
}
/*
Handle non-native image formats.
*/
read_info=CloneImageInfo(image_info);
SetImageInfoBlob(read_info,(void *) NULL,0);
images=NewImageList();
for (scene=0; scene < (ssize_t) number_scenes; scene++)
{
char
filename[MagickPathExtent];
const char
*property;
FILE
*file;
Image
*jpeg_image;
int
unique_file;
unsigned int
tag;
tag=((unsigned int) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image);
length=(size_t) ReadBlobLSBLong(image);
if (tag == 0xFFFEE0DD)
break; /* sequence delimiter tag */
if (tag != 0xFFFEE000)
{
read_info=DestroyImageInfo(read_info);
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
}
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if (file == (FILE *) NULL)
{
(void) RelinquishUniqueFileResource(filename);
ThrowFileException(exception,FileOpenError,
"UnableToCreateTemporaryFile",filename);
break;
}
for (c=EOF; length != 0; length--)
{
c=ReadBlobByte(image);
if (c == EOF)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
(void) fputc(c,file);
}
(void) fclose(file);
if (c == EOF)
break;
(void) FormatLocaleString(read_info->filename,MagickPathExtent,
"jpeg:%s",filename);
if (image->compression == JPEG2000Compression)
(void) FormatLocaleString(read_info->filename,MagickPathExtent,
"j2k:%s",filename);
jpeg_image=ReadImage(read_info,exception);
if (jpeg_image != (Image *) NULL)
{
ResetImagePropertyIterator(image);
property=GetNextImageProperty(image);
while (property != (const char *) NULL)
{
(void) SetImageProperty(jpeg_image,property,
GetImageProperty(image,property,exception),exception);
property=GetNextImageProperty(image);
}
AppendImageToList(&images,jpeg_image);
}
(void) RelinquishUniqueFileResource(filename);
}
read_info=DestroyImageInfo(read_info);
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *)
RelinquishMagickMemory(stream_info->offsets);
stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info);
if (info.scale != (Quantum *) NULL)
info.scale=(Quantum *) RelinquishMagickMemory(info.scale);
if (graymap != (int *) NULL)
graymap=(int *) RelinquishMagickMemory(graymap);
if (bluemap != (int *) NULL)
bluemap=(int *) RelinquishMagickMemory(bluemap);
if (greenmap != (int *) NULL)
greenmap=(int *) RelinquishMagickMemory(greenmap);
if (redmap != (int *) NULL)
redmap=(int *) RelinquishMagickMemory(redmap);
image=DestroyImageList(image);
return(GetFirstImageInList(images));
}
if (info.depth != (1UL*MAGICKCORE_QUANTUM_DEPTH))
{
QuantumAny
range;
/*
Compute pixel scaling table.
*/
length=(size_t) (GetQuantumRange(info.depth)+1);
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile");
if (info.scale != (Quantum *) NULL)
info.scale=(Quantum *) RelinquishMagickMemory(info.scale);
info.scale=(Quantum *) AcquireQuantumMemory(MagickMax(length,256),
sizeof(*info.scale));
if (info.scale == (Quantum *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(info.scale,0,MagickMax(length,256)*
sizeof(*info.scale));
range=GetQuantumRange(info.depth);
for (i=0; i <= (ssize_t) GetQuantumRange(info.depth); i++)
info.scale[i]=ScaleAnyToQuantum((size_t) i,range);
}
if (image->compression == RLECompression)
{
unsigned int
tag;
/*
Read RLE offset table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
{
int
c;
c=ReadBlobByte(image);
if (c == EOF)
break;
}
tag=((unsigned int) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image);
(void) tag;
length=(size_t) ReadBlobLSBLong(image);
if (length > (size_t) GetBlobSize(image))
ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile");
stream_info->offset_count=length >> 2;
if (stream_info->offset_count != 0)
{
stream_info->offsets=(ssize_t *) AcquireQuantumMemory(
stream_info->offset_count,sizeof(*stream_info->offsets));
if (stream_info->offsets == (ssize_t *) NULL)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
{
stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image);
if (EOFBlob(image) != MagickFalse)
break;
}
offset=TellBlob(image)+8;
for (i=0; i < (ssize_t) stream_info->offset_count; i++)
stream_info->offsets[i]+=offset;
}
}
for (scene=0; scene < (ssize_t) number_scenes; scene++)
{
if (image_info->ping != MagickFalse)
break;
image->columns=(size_t) width;
image->rows=(size_t) height;
image->depth=info.depth;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
break;
image->colorspace=RGBColorspace;
(void) SetImageBackgroundColor(image,exception);
if ((image->colormap == (PixelInfo *) NULL) &&
(info.samples_per_pixel == 1))
{
int
index;
size_t
one;
one=1;
if (colors == 0)
colors=one << info.depth;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed");
if (redmap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=redmap[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].red=(MagickRealType) index;
}
if (greenmap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=greenmap[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].green=(MagickRealType) index;
}
if (bluemap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=bluemap[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].blue=(MagickRealType) index;
}
if (graymap != (int *) NULL)
for (i=0; i < (ssize_t) colors; i++)
{
index=graymap[i];
if ((info.scale != (Quantum *) NULL) && (index >= 0) &&
(index <= (int) info.max_value))
index=(int) info.scale[index];
image->colormap[i].red=(MagickRealType) index;
image->colormap[i].green=(MagickRealType) index;
image->colormap[i].blue=(MagickRealType) index;
}
}
if (image->compression == RLECompression)
{
unsigned int
tag;
/*
Read RLE segment table.
*/
for (i=0; i < (ssize_t) stream_info->remaining; i++)
{
int
c;
c=ReadBlobByte(image);
if (c == EOF)
break;
}
tag=((unsigned int) ReadBlobLSBShort(image) << 16) |
ReadBlobLSBShort(image);
stream_info->remaining=(size_t) ReadBlobLSBLong(image);
if ((tag != 0xFFFEE000) || (stream_info->remaining <= 64) ||
(EOFBlob(image) != MagickFalse))
{
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *)
RelinquishMagickMemory(stream_info->offsets);
ThrowDCMException(CorruptImageError,"ImproperImageHeader");
}
stream_info->count=0;
stream_info->segment_count=ReadBlobLSBLong(image);
for (i=0; i < 15; i++)
stream_info->segments[i]=(ssize_t) ReadBlobLSBSignedLong(image);
stream_info->remaining-=64;
if (stream_info->segment_count > 1)
{
info.bytes_per_pixel=1;
info.depth=8;
if (stream_info->offset_count > 0)
(void) SeekBlob(image,(MagickOffsetType)
stream_info->offsets[0]+stream_info->segments[0],SEEK_SET);
}
}
if ((info.samples_per_pixel > 1) && (image->interlace == PlaneInterlace))
{
register ssize_t
x;
register Quantum
*q;
ssize_t
y;
/*
Convert Planar RGB DCM Medical image to pixel packets.
*/
for (i=0; i < (ssize_t) info.samples_per_pixel; i++)
{
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch ((int) i)
{
case 0:
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 1:
{
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 2:
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
case 3:
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadDCMByte(stream_info,image)),q);
break;
}
default:
break;
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
}
}
else
{
const char
*option;
/*
Convert DCM Medical image to pixel packets.
*/
option=GetImageOption(image_info,"dcm:display-range");
if (option != (const char *) NULL)
{
if (LocaleCompare(option,"reset") == 0)
info.window_width=0;
}
option=GetImageOption(image_info,"dcm:window");
if (option != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(option,&geometry_info);
if (flags & RhoValue)
info.window_center=geometry_info.rho;
if (flags & SigmaValue)
info.window_width=geometry_info.sigma;
info.rescale=MagickTrue;
}
option=GetImageOption(image_info,"dcm:rescale");
if (option != (char *) NULL)
info.rescale=IsStringTrue(option);
if ((info.window_center != 0) && (info.window_width == 0))
info.window_width=info.window_center;
status=ReadDCMPixels(image,&info,stream_info,MagickTrue,exception);
if ((status != MagickFalse) && (stream_info->segment_count > 1))
{
if (stream_info->offset_count > 0)
(void) SeekBlob(image,(MagickOffsetType)
stream_info->offsets[0]+stream_info->segments[1],SEEK_SET);
(void) ReadDCMPixels(image,&info,stream_info,MagickFalse,
exception);
}
}
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (scene < (ssize_t) (number_scenes-1))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
if (TellBlob(image) < (MagickOffsetType) GetBlobSize(image))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
/*
Free resources.
*/
if (stream_info->offsets != (ssize_t *) NULL)
stream_info->offsets=(ssize_t *)
RelinquishMagickMemory(stream_info->offsets);
stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info);
if (info.scale != (Quantum *) NULL)
info.scale=(Quantum *) RelinquishMagickMemory(info.scale);
if (graymap != (int *) NULL)
graymap=(int *) RelinquishMagickMemory(graymap);
if (bluemap != (int *) NULL)
bluemap=(int *) RelinquishMagickMemory(bluemap);
if (greenmap != (int *) NULL)
greenmap=(int *) RelinquishMagickMemory(greenmap);
if (redmap != (int *) NULL)
redmap=(int *) RelinquishMagickMemory(redmap);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
| 1 |
[
"CWE-20",
"CWE-252"
] |
ImageMagick
|
6b6bff054d569a77973f2140c0e86366e6168a6c
| 266,436,587,895,599,540,000,000,000,000,000,000,000 | 1,283 |
https://github.com/ImageMagick/ImageMagick/issues/1199
|
RGWOp* RGWHandler_REST_S3Website::op_get()
{
return get_obj_op(true);
}
| 0 |
[
"CWE-79"
] |
ceph
|
8f90658c731499722d5f4393c8ad70b971d05f77
| 305,591,977,287,224,420,000,000,000,000,000,000,000 | 4 |
rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
|
static GList *get_problem_dirs_for_element_in_time(uid_t uid,
const char *element,
const char *value,
unsigned long timestamp_from,
unsigned long timestamp_to)
{
if (timestamp_to == 0) /* not sure this is possible, but... */
timestamp_to = time(NULL);
struct field_and_time_range me = {
.list = NULL,
.element = element,
.value = value,
.timestamp_from = timestamp_from,
.timestamp_to = timestamp_to,
};
for_each_problem_in_dir(g_settings_dump_location, uid, add_dirname_to_GList_if_matches, &me);
return g_list_reverse(me.list);
}
| 0 |
[
"CWE-59"
] |
abrt
|
7417505e1d93cc95ec648b74e3c801bc67aacb9f
| 292,392,563,713,895,120,000,000,000,000,000,000,000 | 21 |
daemon, dbus: allow only root to create CCpp, Koops, vmcore and xorg
Florian Weimer <[email protected]>:
This prevents users from feeding things that are not actually
coredumps and excerpts from /proc to these analyzers.
For example, it should not be possible to trigger a rule with
“EVENT=post-create analyzer=CCpp” using NewProblem
Related: #1212861
Signed-off-by: Jakub Filak <[email protected]>
|
int gnutls_ocsp_req_get_version(gnutls_ocsp_req_t req)
{
uint8_t version[8];
int len, ret;
if (req == NULL) {
gnutls_assert();
return GNUTLS_E_INVALID_REQUEST;
}
len = sizeof(version);
ret =
asn1_read_value(req->req, "tbsRequest.version", version, &len);
if (ret != ASN1_SUCCESS) {
if (ret == ASN1_ELEMENT_NOT_FOUND)
return 1; /* the DEFAULT version */
gnutls_assert();
return _gnutls_asn2err(ret);
}
return (int) version[0] + 1;
}
| 0 |
[
"CWE-264"
] |
gnutls
|
964632f37dfdfb914ebc5e49db4fa29af35b1de9
| 261,082,707,263,355,740,000,000,000,000,000,000,000 | 22 |
ocsp: corrected the comparison of the serial size in OCSP response
Previously the OCSP certificate check wouldn't verify the serial length
and could succeed in cases it shouldn't.
Reported by Stefan Buehler.
|
do_unlet_var(
lval_T *lp,
char_u *name_end,
exarg_T *eap,
int deep UNUSED,
void *cookie UNUSED)
{
int forceit = eap->forceit;
int ret = OK;
int cc;
if (lp->ll_tv == NULL)
{
cc = *name_end;
*name_end = NUL;
// Environment variable, normal name or expanded name.
if (*lp->ll_name == '$')
vim_unsetenv(lp->ll_name + 1);
else if (do_unlet(lp->ll_name, forceit) == FAIL)
ret = FAIL;
*name_end = cc;
}
else if ((lp->ll_list != NULL
&& value_check_lock(lp->ll_list->lv_lock, lp->ll_name, FALSE))
|| (lp->ll_dict != NULL
&& value_check_lock(lp->ll_dict->dv_lock, lp->ll_name, FALSE)))
return FAIL;
else if (lp->ll_range)
{
if (list_unlet_range(lp->ll_list, lp->ll_li, lp->ll_name, lp->ll_n1,
!lp->ll_empty2, lp->ll_n2) == FAIL)
return FAIL;
}
else
{
if (lp->ll_list != NULL)
// unlet a List item.
listitem_remove(lp->ll_list, lp->ll_li);
else
// unlet a Dictionary item.
dictitem_remove(lp->ll_dict, lp->ll_di);
}
return ret;
}
| 0 |
[
"CWE-476"
] |
vim
|
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
| 42,479,630,427,290,366,000,000,000,000,000,000,000 | 46 |
patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window.
|
static int is_al_reg(const Operand *op) {
if (op->type & OT_MEMORY) {
return 0;
}
if (op->reg == X86R_AL && op->type & OT_BYTE) {
return 1;
}
return 0;
}
| 0 |
[
"CWE-119",
"CWE-125",
"CWE-787"
] |
radare2
|
9b46d38dd3c4de6048a488b655c7319f845af185
| 129,250,618,988,467,150,000,000,000,000,000,000,000 | 9 |
Fix #12372 and #12373 - Crash in x86 assembler (#12380)
0 ,0,[bP-bL-bP-bL-bL-r-bL-bP-bL-bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
leA ,0,[bP-bL-bL-bP-bL-bP-bL-60@bL-
leA ,0,[bP-bL-r-bP-bL-bP-bL-60@bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
|
static void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
cancel_delayed_work_sync(&bond->alb_work);
cancel_delayed_work_sync(&bond->ad_work);
cancel_delayed_work_sync(&bond->mcast_work);
cancel_delayed_work_sync(&bond->slave_arr_work);
}
| 0 |
[
"CWE-476",
"CWE-703"
] |
linux
|
105cd17a866017b45f3c45901b394c711c97bf40
| 167,865,608,869,038,800,000,000,000,000,000,000,000 | 9 |
bonding: fix null dereference in bond_ipsec_add_sa()
If bond doesn't have real device, bond->curr_active_slave is null.
But bond_ipsec_add_sa() dereferences bond->curr_active_slave without
null checking.
So, null-ptr-deref would occur.
Test commands:
ip link add bond0 type bond
ip link set bond0 up
ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi \
0x07 mode transport reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \
0x44434241343332312423222114131211f4f3f2f1 128 sel src 14.0.0.52/24 \
dst 14.0.0.70/24 proto tcp offload dev bond0 dir in
Splat looks like:
KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
CPU: 4 PID: 680 Comm: ip Not tainted 5.13.0-rc3+ #1168
RIP: 0010:bond_ipsec_add_sa+0xc4/0x2e0 [bonding]
Code: 85 21 02 00 00 4d 8b a6 48 0c 00 00 e8 75 58 44 ce 85 c0 0f 85 14
01 00 00 48 b8 00 00 00 00 00 fc ff df 4c 89 e2 48 c1 ea 03 <80> 3c 02
00 0f 85 fc 01 00 00 48 8d bb e0 02 00 00 4d 8b 2c 24 48
RSP: 0018:ffff88810946f508 EFLAGS: 00010246
RAX: dffffc0000000000 RBX: ffff88810b4e8040 RCX: 0000000000000001
RDX: 0000000000000000 RSI: ffffffff8fe34280 RDI: ffff888115abe100
RBP: ffff88810946f528 R08: 0000000000000003 R09: fffffbfff2287e11
R10: 0000000000000001 R11: ffff888115abe0c8 R12: 0000000000000000
R13: ffffffffc0aea9a0 R14: ffff88800d7d2000 R15: ffff88810b4e8330
FS: 00007efc5552e680(0000) GS:ffff888119c00000(0000)
knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 000055c2530dbf40 CR3: 0000000103056004 CR4: 00000000003706e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
xfrm_dev_state_add+0x2a9/0x770
? memcpy+0x38/0x60
xfrm_add_sa+0x2278/0x3b10 [xfrm_user]
? xfrm_get_policy+0xaa0/0xaa0 [xfrm_user]
? register_lock_class+0x1750/0x1750
xfrm_user_rcv_msg+0x331/0x660 [xfrm_user]
? rcu_read_lock_sched_held+0x91/0xc0
? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user]
? find_held_lock+0x3a/0x1c0
? mutex_lock_io_nested+0x1210/0x1210
? sched_clock_cpu+0x18/0x170
netlink_rcv_skb+0x121/0x350
? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user]
? netlink_ack+0x9d0/0x9d0
? netlink_deliver_tap+0x17c/0xa50
xfrm_netlink_rcv+0x68/0x80 [xfrm_user]
netlink_unicast+0x41c/0x610
? netlink_attachskb+0x710/0x710
netlink_sendmsg+0x6b9/0xb70
[ ...]
Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves")
Signed-off-by: Taehee Yoo <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int IPAddressOrRange_cmp(const IPAddressOrRange *a,
const IPAddressOrRange *b, const int length)
{
unsigned char addr_a[ADDR_RAW_BUF_LEN], addr_b[ADDR_RAW_BUF_LEN];
int prefixlen_a = 0, prefixlen_b = 0;
int r;
switch (a->type) {
case IPAddressOrRange_addressPrefix:
if (!addr_expand(addr_a, a->u.addressPrefix, length, 0x00))
return -1;
prefixlen_a = addr_prefixlen(a->u.addressPrefix);
break;
case IPAddressOrRange_addressRange:
if (!addr_expand(addr_a, a->u.addressRange->min, length, 0x00))
return -1;
prefixlen_a = length * 8;
break;
}
switch (b->type) {
case IPAddressOrRange_addressPrefix:
if (!addr_expand(addr_b, b->u.addressPrefix, length, 0x00))
return -1;
prefixlen_b = addr_prefixlen(b->u.addressPrefix);
break;
case IPAddressOrRange_addressRange:
if (!addr_expand(addr_b, b->u.addressRange->min, length, 0x00))
return -1;
prefixlen_b = length * 8;
break;
}
if ((r = memcmp(addr_a, addr_b, length)) != 0)
return r;
else
return prefixlen_a - prefixlen_b;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
openssl
|
068b963bb7afc57f5bdd723de0dd15e7795d5822
| 106,372,837,446,230,300,000,000,000,000,000,000,000 | 38 |
Avoid out-of-bounds read
Fixes CVE 2017-3735
Reviewed-by: Kurt Roeckx <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/4276)
(cherry picked from commit b23171744b01e473ebbfd6edad70c1c3825ffbcd)
|
void x25_limit_facilities(struct x25_facilities *facilities,
struct x25_neigh *nb)
{
if (!nb->extended) {
if (facilities->winsize_in > 7) {
pr_debug("incoming winsize limited to 7\n");
facilities->winsize_in = 7;
}
if (facilities->winsize_out > 7) {
facilities->winsize_out = 7;
pr_debug("outgoing winsize limited to 7\n");
}
}
}
| 0 |
[
"CWE-200"
] |
linux
|
79e48650320e6fba48369fccf13fd045315b19b8
| 97,121,901,940,326,900,000,000,000,000,000,000,000 | 15 |
net: fix a kernel infoleak in x25 module
Stack object "dte_facilities" is allocated in x25_rx_call_request(),
which is supposed to be initialized in x25_negotiate_facilities.
However, 5 fields (8 bytes in total) are not initialized. This
object is then copied to userland via copy_to_user, thus infoleak
occurs.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
acldefault_sql(PG_FUNCTION_ARGS)
{
char objtypec = PG_GETARG_CHAR(0);
Oid owner = PG_GETARG_OID(1);
GrantObjectType objtype = 0;
switch (objtypec)
{
case 'c':
objtype = ACL_OBJECT_COLUMN;
break;
case 'r':
objtype = ACL_OBJECT_RELATION;
break;
case 's':
objtype = ACL_OBJECT_SEQUENCE;
break;
case 'd':
objtype = ACL_OBJECT_DATABASE;
break;
case 'f':
objtype = ACL_OBJECT_FUNCTION;
break;
case 'l':
objtype = ACL_OBJECT_LANGUAGE;
break;
case 'L':
objtype = ACL_OBJECT_LARGEOBJECT;
break;
case 'n':
objtype = ACL_OBJECT_NAMESPACE;
break;
case 't':
objtype = ACL_OBJECT_TABLESPACE;
break;
case 'F':
objtype = ACL_OBJECT_FDW;
break;
case 'S':
objtype = ACL_OBJECT_FOREIGN_SERVER;
break;
case 'T':
objtype = ACL_OBJECT_TYPE;
break;
default:
elog(ERROR, "unrecognized objtype abbreviation: %c", objtypec);
}
PG_RETURN_ACL_P(acldefault(objtype, owner));
}
| 0 |
[
"CWE-264"
] |
postgres
|
fea164a72a7bfd50d77ba5fb418d357f8f2bb7d0
| 63,272,938,656,567,120,000,000,000,000,000,000,000 | 50 |
Shore up ADMIN OPTION restrictions.
Granting a role without ADMIN OPTION is supposed to prevent the grantee
from adding or removing members from the granted role. Issuing SET ROLE
before the GRANT bypassed that, because the role itself had an implicit
right to add or remove members. Plug that hole by recognizing that
implicit right only when the session user matches the current role.
Additionally, do not recognize it during a security-restricted operation
or during execution of a SECURITY DEFINER function. The restriction on
SECURITY DEFINER is not security-critical. However, it seems best for a
user testing his own SECURITY DEFINER function to see the same behavior
others will see. Back-patch to 8.4 (all supported versions).
The SQL standards do not conflate roles and users as PostgreSQL does;
only SQL roles have members, and only SQL users initiate sessions. An
application using PostgreSQL users and roles as SQL users and roles will
never attempt to grant membership in the role that is the session user,
so the implicit right to add or remove members will never arise.
The security impact was mostly that a role member could revoke access
from others, contrary to the wishes of his own grantor. Unapproved role
member additions are less notable, because the member can still largely
achieve that by creating a view or a SECURITY DEFINER function.
Reviewed by Andres Freund and Tom Lane. Reported, independently, by
Jonas Sundman and Noah Misch.
Security: CVE-2014-0060
|
static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
s32 seq_rtt, s32 sack_rtt)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Prefer RTT measured from ACK's timing to TS-ECR. This is because
* broken middle-boxes or peers may corrupt TS-ECR fields. But
* Karn's algorithm forbids taking RTT if some retransmitted data
* is acked (RFC6298).
*/
if (flag & FLAG_RETRANS_DATA_ACKED)
seq_rtt = -1;
if (seq_rtt < 0)
seq_rtt = sack_rtt;
/* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment
* acknowledges some new data, i.e., only if it advances the
* left edge of the send window.
* See draft-ietf-tcplw-high-performance-00, section 3.3.
*/
if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
flag & FLAG_ACKED)
seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
if (seq_rtt < 0)
return false;
tcp_rtt_estimator(sk, seq_rtt);
tcp_set_rto(sk);
/* RFC6298: only reset backoff on valid RTT measurement. */
inet_csk(sk)->icsk_backoff = 0;
return true;
}
| 0 |
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
| 290,834,690,456,059,200,000,000,000,000,000,000,000 | 36 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
|
update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
{
struct virtio_gpu_simple_resource *res;
res = virtio_gpu_find_resource(g, resource_id);
g_return_if_fail(res != NULL);
g_return_if_fail(pixman_image_get_width(res->image) == 64);
g_return_if_fail(pixman_image_get_height(res->image) == 64);
g_return_if_fail(
PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
}
| 0 |
[] |
qemu
|
86dd8fac2acc366930a5dc08d3fb1b1e816f4e1e
| 243,097,386,518,048,700,000,000,000,000,000,000,000 | 13 |
vhost-user-gpu: fix resource leak in 'vg_resource_create_2d' (CVE-2021-3544)
Call 'vugbm_buffer_destroy' in error path to avoid resource leak.
Fixes: CVE-2021-3544
Reported-by: Li Qiang <[email protected]>
Reviewed-by: Prasad J Pandit <[email protected]>
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Marc-André Lureau <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
|
int node_random(const nodemask_t *maskp)
{
int w, bit = -1;
w = nodes_weight(*maskp);
if (w)
bit = bitmap_ord_to_pos(maskp->bits,
get_random_int() % w, MAX_NUMNODES);
return bit;
}
| 0 |
[
"CWE-264"
] |
linux-2.6
|
1a5a9906d4e8d1976b701f889d8f35d54b928f25
| 150,893,565,141,902,780,000,000,000,000,000,000,000 | 10 |
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
{
struct siginfo info;
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_USER;
info.si_pid = task_tgid_vnr(current);
info.si_uid = current_uid();
return kill_something_info(sig, &info, pid);
}
| 0 |
[] |
linux-2.6
|
0083fc2c50e6c5127c2802ad323adf8143ab7856
| 42,567,083,425,032,950,000,000,000,000,000,000,000 | 12 |
do_sigaltstack: avoid copying 'stack_t' as a structure to user space
Ulrich Drepper correctly points out that there is generally padding in
the structure on 64-bit hosts, and that copying the structure from
kernel to user space can leak information from the kernel stack in those
padding bytes.
Avoid the whole issue by just copying the three members one by one
instead, which also means that the function also can avoid the need for
a stack frame. This also happens to match how we copy the new structure
from user space, so it all even makes sense.
[ The obvious solution of adding a memset() generates horrid code, gcc
does really stupid things. ]
Reported-by: Ulrich Drepper <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
vrrp_mcast_group6_handler(vector_t *strvec)
{
struct sockaddr_in6 *mcast = &global_data->vrrp_mcast_group6;
int ret;
ret = inet_stosockaddr(strvec_slot(strvec, 1), 0, (struct sockaddr_storage *)mcast);
if (ret < 0) {
report_config_error(CONFIG_GENERAL_ERROR, "Configuration error: Cant parse vrrp_mcast_group6 [%s]. Skipping"
, FMT_STR_VSLOT(strvec, 1));
}
}
| 0 |
[
"CWE-200"
] |
keepalived
|
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
| 106,783,342,219,805,390,000,000,000,000,000,000,000 | 11 |
Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]>
|
zsetrgbcolor(i_ctx_t * i_ctx_p)
{
os_ptr op = osp; /* required by "push" macro */
int code, i;
float values[3];
/* Gather numeric operand value(s) (also checks type) */
code = float_params(op, 3, (float *)&values);
if (code < 0)
return code;
/* Clamp numeric operand range(s) */
for (i = 0;i < 3; i++) {
if (values[i] < 0)
values[i] = 0;
else if (values[i] > 1)
values[i] = 1;
}
code = make_floats(&op[-2], (const float *)&values, 3);
if (code < 0)
return code;
/* Set up for the continuation procedure which will do the work */
/* Make sure the exec stack has enough space */
check_estack(5);
push_mark_estack(es_other, colour_cleanup);
esp++;
/* variable to hold base type (1 = RGB) */
make_int(esp, 1);
esp++;
/* Store the 'stage' of processing (initially 0) */
make_int(esp, 0);
/* Finally, the actual continuation routine */
push_op_estack(setdevicecolor_cont);
return o_push_estack;
}
| 0 |
[] |
ghostpdl
|
b326a71659b7837d3acde954b18bda1a6f5e9498
| 184,859,140,481,857,360,000,000,000,000,000,000,000 | 36 |
Bug 699655: Properly check the return value....
...when getting a value from a dictionary
|
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
}
| 0 |
[
"CWE-416"
] |
linux
|
7a9cdebdcc17e426fb5287e4a82db1dfe86339b2
| 105,590,958,465,191,100,000,000,000,000,000,000,000 | 4 |
mm: get rid of vmacache_flush_all() entirely
Jann Horn points out that the vmacache_flush_all() function is not only
potentially expensive, it's buggy too. It also happens to be entirely
unnecessary, because the sequence number overflow case can be avoided by
simply making the sequence number be 64-bit. That doesn't even grow the
data structures in question, because the other adjacent fields are
already 64-bit.
So simplify the whole thing by just making the sequence number overflow
case go away entirely, which gets rid of all the complications and makes
the code faster too. Win-win.
[ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics
also just goes away entirely with this ]
Reported-by: Jann Horn <[email protected]>
Suggested-by: Will Deacon <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
void opj_tcd_makelayer_fixed(opj_tcd_t *tcd, OPJ_UINT32 layno,
OPJ_UINT32 final)
{
OPJ_UINT32 compno, resno, bandno, precno, cblkno;
OPJ_INT32 value; /*, matrice[tcd_tcp->numlayers][tcd_tile->comps[0].numresolutions][3]; */
OPJ_INT32 matrice[10][10][3];
OPJ_UINT32 i, j, k;
opj_cp_t *cp = tcd->cp;
opj_tcd_tile_t *tcd_tile = tcd->tcd_image->tiles;
opj_tcp_t *tcd_tcp = tcd->tcp;
for (compno = 0; compno < tcd_tile->numcomps; compno++) {
opj_tcd_tilecomp_t *tilec = &tcd_tile->comps[compno];
for (i = 0; i < tcd_tcp->numlayers; i++) {
for (j = 0; j < tilec->numresolutions; j++) {
for (k = 0; k < 3; k++) {
matrice[i][j][k] =
(OPJ_INT32)((OPJ_FLOAT32)cp->m_specific_param.m_enc.m_matrice[i *
tilec->numresolutions * 3 + j * 3 + k]
* (OPJ_FLOAT32)(tcd->image->comps[compno].prec / 16.0));
}
}
}
for (resno = 0; resno < tilec->numresolutions; resno++) {
opj_tcd_resolution_t *res = &tilec->resolutions[resno];
for (bandno = 0; bandno < res->numbands; bandno++) {
opj_tcd_band_t *band = &res->bands[bandno];
/* Skip empty bands */
if (opj_tcd_is_band_empty(band)) {
continue;
}
for (precno = 0; precno < res->pw * res->ph; precno++) {
opj_tcd_precinct_t *prc = &band->precincts[precno];
for (cblkno = 0; cblkno < prc->cw * prc->ch; cblkno++) {
opj_tcd_cblk_enc_t *cblk = &prc->cblks.enc[cblkno];
opj_tcd_layer_t *layer = &cblk->layers[layno];
OPJ_UINT32 n;
OPJ_INT32 imsb = (OPJ_INT32)(tcd->image->comps[compno].prec -
cblk->numbps); /* number of bit-plan equal to zero */
/* Correction of the matrix of coefficient to include the IMSB information */
if (layno == 0) {
value = matrice[layno][resno][bandno];
if (imsb >= value) {
value = 0;
} else {
value -= imsb;
}
} else {
value = matrice[layno][resno][bandno] - matrice[layno - 1][resno][bandno];
if (imsb >= matrice[layno - 1][resno][bandno]) {
value -= (imsb - matrice[layno - 1][resno][bandno]);
if (value < 0) {
value = 0;
}
}
}
if (layno == 0) {
cblk->numpassesinlayers = 0;
}
n = cblk->numpassesinlayers;
if (cblk->numpassesinlayers == 0) {
if (value != 0) {
n = 3 * (OPJ_UINT32)value - 2 + cblk->numpassesinlayers;
} else {
n = cblk->numpassesinlayers;
}
} else {
n = 3 * (OPJ_UINT32)value + cblk->numpassesinlayers;
}
layer->numpasses = n - cblk->numpassesinlayers;
if (!layer->numpasses) {
continue;
}
if (cblk->numpassesinlayers == 0) {
layer->len = cblk->passes[n - 1].rate;
layer->data = cblk->data;
} else {
layer->len = cblk->passes[n - 1].rate - cblk->passes[cblk->numpassesinlayers -
1].rate;
layer->data = cblk->data + cblk->passes[cblk->numpassesinlayers - 1].rate;
}
if (final) {
cblk->numpassesinlayers = n;
}
}
}
}
}
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
openjpeg
|
397f62c0a838e15d667ef50e27d5d011d2c79c04
| 91,749,747,998,771,100,000,000,000,000,000,000,000 | 104 |
Fix write heap buffer overflow in opj_mqc_byteout(). Discovered by Ke Liu of Tencent's Xuanwu LAB (#835)
|
bool CZNC::WaitForChildLock() { return m_pLockFile && m_pLockFile->ExLock(); }
| 0 |
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
| 71,026,257,046,957,400,000,000,000,000,000,000,000 | 1 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
|
bool DataReaderImpl::deadline_missed()
{
assert(qos_.deadline().period != c_TimeInfinite);
std::unique_lock<RecursiveTimedMutex> lock(reader_->getMutex());
deadline_missed_status_.total_count++;
deadline_missed_status_.total_count_change++;
deadline_missed_status_.last_instance_handle = timer_owner_;
listener_->on_requested_deadline_missed(user_datareader_, deadline_missed_status_);
subscriber_->subscriber_listener_.on_requested_deadline_missed(user_datareader_, deadline_missed_status_);
deadline_missed_status_.total_count_change = 0;
if (!history_.set_next_deadline(
timer_owner_,
steady_clock::now() + duration_cast<system_clock::duration>(deadline_duration_us_)))
{
logError(SUBSCRIBER, "Could not set next deadline in the history");
return false;
}
return deadline_timer_reschedule();
}
| 0 |
[
"CWE-284"
] |
Fast-DDS
|
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
| 177,961,411,581,524,340,000,000,000,000,000,000,000 | 22 |
check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <[email protected]>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <[email protected]>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <[email protected]>
Co-authored-by: Miguel Company <[email protected]>
|
static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
/*
* take a temporary reference on the node so it
* survives and isn't removed from the list
* while we print it.
*/
node->tmp_refs++;
spin_unlock(&binder_dead_nodes_lock);
if (last_node)
binder_put_node(last_node);
binder_node_lock(node);
print_binder_node_nilocked(m, node);
binder_node_unlock(node);
last_node = node;
spin_lock(&binder_dead_nodes_lock);
}
spin_unlock(&binder_dead_nodes_lock);
if (last_node)
binder_put_node(last_node);
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
mutex_unlock(&binder_procs_lock);
return 0;
}
| 0 |
[
"CWE-416"
] |
linux
|
7bada55ab50697861eee6bb7d60b41e68a961a9c
| 38,586,114,764,900,220,000,000,000,000,000,000,000 | 38 |
binder: fix race that allows malicious free of live buffer
Malicious code can attempt to free buffers using the BC_FREE_BUFFER
ioctl to binder. There are protections against a user freeing a buffer
while in use by the kernel, however there was a window where
BC_FREE_BUFFER could be used to free a recently allocated buffer that
was not completely initialized. This resulted in a use-after-free
detected by KASAN with a malicious test program.
This window is closed by setting the buffer's allow_user_free attribute
to 0 when the buffer is allocated or when the user has previously freed
it instead of waiting for the caller to set it. The problem was that
when the struct buffer was recycled, allow_user_free was stale and set
to 1 allowing a free to go through.
Signed-off-by: Todd Kjos <[email protected]>
Acked-by: Arve Hjønnevåg <[email protected]>
Cc: stable <[email protected]> # 4.14
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static int selinux_kernel_module_request(void)
{
return task_has_system(current, SYSTEM__MODULE_REQUEST);
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 319,029,731,990,338,830,000,000,000,000,000,000,000 | 4 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
sdap_initgr_store_user_memberships(struct sdap_initgr_nested_state *state)
{
errno_t ret;
int tret;
const char *orig_dn;
char **sysdb_parent_name_list = NULL;
char **ldap_parent_name_list = NULL;
int nparents;
struct sysdb_attrs **ldap_parentlist;
struct ldb_message_element *el;
int i, mi;
char **add_groups;
char **del_groups;
TALLOC_CTX *tmp_ctx;
bool in_transaction = false;
tmp_ctx = talloc_new(NULL);
if (!tmp_ctx) {
ret = ENOMEM;
goto done;
}
/* Get direct LDAP parents */
ret = sysdb_attrs_get_string(state->user, SYSDB_ORIG_DN, &orig_dn);
if (ret != EOK) {
DEBUG(SSSDBG_OP_FAILURE, "The user has no original DN\n");
goto done;
}
ldap_parentlist = talloc_zero_array(tmp_ctx, struct sysdb_attrs *,
state->groups_cur + 1);
if (!ldap_parentlist) {
ret = ENOMEM;
goto done;
}
nparents = 0;
for (i=0; i < state->groups_cur ; i++) {
ret = sysdb_attrs_get_el(state->groups[i], SYSDB_MEMBER, &el);
if (ret) {
DEBUG(SSSDBG_MINOR_FAILURE,
"A group with no members during initgroups?\n");
goto done;
}
for (mi = 0; mi < el->num_values; mi++) {
if (strcasecmp((const char *) el->values[mi].data, orig_dn) != 0) {
continue;
}
ldap_parentlist[nparents] = state->groups[i];
nparents++;
}
}
DEBUG(SSSDBG_TRACE_LIBS,
"The user %s is a direct member of %d LDAP groups\n",
state->username, nparents);
if (nparents == 0) {
ldap_parent_name_list = NULL;
} else {
ret = sysdb_attrs_primary_name_list(state->sysdb, tmp_ctx,
ldap_parentlist,
nparents,
state->opts->group_map[SDAP_AT_GROUP_NAME].name,
&ldap_parent_name_list);
if (ret != EOK) {
DEBUG(SSSDBG_CRIT_FAILURE,
"sysdb_attrs_primary_name_list failed [%d]: %s\n",
ret, strerror(ret));
goto done;
}
}
ret = sysdb_get_direct_parents(tmp_ctx, state->sysdb, state->dom,
SYSDB_MEMBER_USER,
state->username, &sysdb_parent_name_list);
if (ret) {
DEBUG(SSSDBG_CRIT_FAILURE,
"Could not get direct sysdb parents for %s: %d [%s]\n",
state->username, ret, strerror(ret));
goto done;
}
ret = diff_string_lists(tmp_ctx,
ldap_parent_name_list, sysdb_parent_name_list,
&add_groups, &del_groups, NULL);
if (ret != EOK) {
goto done;
}
ret = sysdb_transaction_start(state->sysdb);
if (ret != EOK) {
DEBUG(SSSDBG_CRIT_FAILURE, "Failed to start transaction\n");
goto done;
}
in_transaction = true;
DEBUG(SSSDBG_TRACE_INTERNAL,
"Updating memberships for %s\n", state->username);
ret = sysdb_update_members(state->sysdb, state->dom,
state->username, SYSDB_MEMBER_USER,
(const char *const *) add_groups,
(const char *const *) del_groups);
if (ret != EOK) {
DEBUG(SSSDBG_CRIT_FAILURE,
"Could not update sysdb memberships for %s: %d [%s]\n",
state->username, ret, strerror(ret));
goto done;
}
ret = sysdb_transaction_commit(state->sysdb);
if (ret != EOK) {
goto done;
}
in_transaction = false;
ret = EOK;
done:
if (in_transaction) {
tret = sysdb_transaction_cancel(state->sysdb);
if (tret != EOK) {
DEBUG(SSSDBG_CRIT_FAILURE, "Failed to cancel transaction\n");
}
}
talloc_zfree(tmp_ctx);
return ret;
}
| 0 |
[
"CWE-264"
] |
sssd
|
0b6b4b7669b46d3d0b0ebefbc0e1621965444717
| 241,835,883,299,960,650,000,000,000,000,000,000,000 | 131 |
IPA: process non-posix nested groups
Do not expect objectClass to be posixGroup but rather more general
groupofnames.
Resolves:
https://fedorahosted.org/sssd/ticket/2343
Reviewed-by: Michal Židek <[email protected]>
(cherry picked from commit bc8c93ffe881271043492c938c626a9be948000e)
|
static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
struct sock *other;
unsigned int hash;
int err;
if (addr->sa_family != AF_UNSPEC) {
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
goto out;
alen = err;
if (test_bit(SOCK_PASSCRED, &sock->flags) &&
!unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
goto out;
restart:
other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
if (!other)
goto out;
unix_state_double_lock(sk, other);
/* Apparently VFS overslept socket death. Retry. */
if (sock_flag(other, SOCK_DEAD)) {
unix_state_double_unlock(sk, other);
sock_put(other);
goto restart;
}
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
*/
other = NULL;
unix_state_double_lock(sk, other);
}
/*
* If it was connected, reconnect.
*/
if (unix_peer(sk)) {
struct sock *old_peer = unix_peer(sk);
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
if (other != old_peer)
unix_dgram_disconnected(sk, old_peer);
sock_put(old_peer);
} else {
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
}
return 0;
out_unlock:
unix_state_double_unlock(sk, other);
sock_put(other);
out:
return err;
}
| 1 |
[] |
net
|
7d267278a9ece963d77eefec61630223fce08c6c
| 47,718,229,280,569,040,000,000,000,000,000,000,000 | 73 |
unix: avoid use-after-free in ep_remove_wait_queue
Rainer Weikusat <[email protected]> writes:
An AF_UNIX datagram socket being the client in an n:1 association with
some server socket is only allowed to send messages to the server if the
receive queue of this socket contains at most sk_max_ack_backlog
datagrams. This implies that prospective writers might be forced to go
to sleep despite none of the message presently enqueued on the server
receive queue were sent by them. In order to ensure that these will be
woken up once space becomes again available, the present unix_dgram_poll
routine does a second sock_poll_wait call with the peer_wait wait queue
of the server socket as queue argument (unix_dgram_recvmsg does a wake
up on this queue after a datagram was received). This is inherently
problematic because the server socket is only guaranteed to remain alive
for as long as the client still holds a reference to it. In case the
connection is dissolved via connect or by the dead peer detection logic
in unix_dgram_sendmsg, the server socket may be freed despite "the
polling mechanism" (in particular, epoll) still has a pointer to the
corresponding peer_wait queue. There's no way to forcibly deregister a
wait queue with epoll.
Based on an idea by Jason Baron, the patch below changes the code such
that a wait_queue_t belonging to the client socket is enqueued on the
peer_wait queue of the server whenever the peer receive queue full
condition is detected by either a sendmsg or a poll. A wake up on the
peer queue is then relayed to the ordinary wait queue of the client
socket via wake function. The connection to the peer wait queue is again
dissolved if either a wake up is about to be relayed or the client
socket reconnects or a dead peer is detected or the client socket is
itself closed. This enables removing the second sock_poll_wait from
unix_dgram_poll, thus avoiding the use-after-free, while still ensuring
that no blocked writer sleeps forever.
Signed-off-by: Rainer Weikusat <[email protected]>
Fixes: ec0d215f9420 ("af_unix: fix 'poll for write'/connected DGRAM sockets")
Reviewed-by: Jason Baron <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void winbindd_lookupsids_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct winbindd_lookupsids_state *state = tevent_req_data(
req, struct winbindd_lookupsids_state);
NTSTATUS status;
status = wb_lookupsids_recv(subreq, state, &state->domains,
&state->names);
TALLOC_FREE(subreq);
if (tevent_req_nterror(req, status)) {
return;
}
tevent_req_done(req);
}
| 0 |
[
"CWE-476"
] |
samba
|
595dd9fc4162dd70ad937db8669a0fddbbba9584
| 240,903,487,272,679,040,000,000,000,000,000,000,000 | 16 |
CVE-2020-14323 winbind: Fix invalid lookupsids DoS
A lookupsids request without extra_data will lead to "state->domain==NULL",
which makes winbindd_lookupsids_recv trying to dereference it.
Reported by Bas Alberts of the GitHub Security Lab Team as GHSL-2020-134
Bug: https://bugzilla.samba.org/show_bug.cgi?id=14436
Signed-off-by: Volker Lendecke <[email protected]>
|
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
u8 *mac_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
int type = skb_shinfo(skb)->gso_type;
if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
!(type & (SKB_GSO_UDP))))
goto out;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
segs = NULL;
goto out;
}
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
offset = skb->csum_start - skb_headroom(skb);
csum = skb_checksum(skb, offset, skb->len- offset, 0);
offset += skb->csum_offset;
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
if ((skb_headroom(skb) < frag_hdr_sz) &&
pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
goto out;
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
unfrag_ip6hlen;
mac_start = skb_mac_header(skb);
memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
ipv6_select_ident(fptr);
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
*/
segs = skb_segment(skb, features);
out:
return segs;
}
| 1 |
[
"CWE-399"
] |
linux
|
a9cf73ea7ff78f52662c8658d93c226effbbedde
| 144,836,037,496,943,110,000,000,000,000,000,000,000 | 71 |
ipv6: udp: fix the wrong headroom check
At this point, skb->data points to skb_transport_header.
So, headroom check is wrong.
For some case:bridge(UFO is on) + eth device(UFO is off),
there is no enough headroom for IPv6 frag head.
But headroom check is always false.
This will bring about data be moved to there prior to skb->head,
when adding IPv6 frag header to skb.
Signed-off-by: Shan Wei <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
const boost::optional<int>& TopologyDescription::getLogicalSessionTimeoutMinutes() const {
return _logicalSessionTimeoutMinutes;
}
| 0 |
[
"CWE-755"
] |
mongo
|
75f7184eafa78006a698cda4c4adfb57f1290047
| 107,412,250,891,536,600,000,000,000,000,000,000,000 | 3 |
SERVER-50170 fix max staleness read preference parameter for server selection
|
int stats_check_uri(struct stream_interface *si, struct http_txn *txn, struct proxy *backend)
{
struct uri_auth *uri_auth = backend->uri_auth;
struct http_msg *msg = &txn->req;
const char *uri = msg->chn->buf->p+ msg->sl.rq.u;
const char *h;
if (!uri_auth)
return 0;
if (txn->meth != HTTP_METH_GET && txn->meth != HTTP_METH_HEAD && txn->meth != HTTP_METH_POST)
return 0;
memset(&si->applet.ctx.stats, 0, sizeof(si->applet.ctx.stats));
si->applet.ctx.stats.st_code = STAT_STATUS_INIT;
si->applet.ctx.stats.flags |= STAT_FMT_HTML; /* assume HTML mode by default */
/* check URI size */
if (uri_auth->uri_len > msg->sl.rq.u_l)
return 0;
h = uri;
if (memcmp(h, uri_auth->uri_prefix, uri_auth->uri_len) != 0)
return 0;
h += uri_auth->uri_len;
while (h <= uri + msg->sl.rq.u_l - 3) {
if (memcmp(h, ";up", 3) == 0) {
si->applet.ctx.stats.flags |= STAT_HIDE_DOWN;
break;
}
h++;
}
if (uri_auth->refresh) {
h = uri + uri_auth->uri_len;
while (h <= uri + msg->sl.rq.u_l - 10) {
if (memcmp(h, ";norefresh", 10) == 0) {
si->applet.ctx.stats.flags |= STAT_NO_REFRESH;
break;
}
h++;
}
}
h = uri + uri_auth->uri_len;
while (h <= uri + msg->sl.rq.u_l - 4) {
if (memcmp(h, ";csv", 4) == 0) {
si->applet.ctx.stats.flags &= ~STAT_FMT_HTML;
break;
}
h++;
}
h = uri + uri_auth->uri_len;
while (h <= uri + msg->sl.rq.u_l - 8) {
if (memcmp(h, ";st=", 4) == 0) {
int i;
h += 4;
si->applet.ctx.stats.st_code = STAT_STATUS_UNKN;
for (i = STAT_STATUS_INIT + 1; i < STAT_STATUS_SIZE; i++) {
if (strncmp(stat_status_codes[i], h, 4) == 0) {
si->applet.ctx.stats.st_code = i;
break;
}
}
break;
}
h++;
}
return 1;
}
| 0 |
[] |
haproxy
|
aae75e3279c6c9bd136413a72dafdcd4986bb89a
| 115,667,077,757,683,150,000,000,000,000,000,000,000 | 72 |
BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
During normal HTTP request processing, request buffers are realigned if
there are less than global.maxrewrite bytes available after them, in
order to leave enough room for rewriting headers after the request. This
is done in http_wait_for_request().
However, if some HTTP inspection happens during a "tcp-request content"
rule, this realignment is not performed. In theory this is not a problem
because empty buffers are always aligned and TCP inspection happens at
the beginning of a connection. But with HTTP keep-alive, it also happens
at the beginning of each subsequent request. So if a second request was
pipelined by the client before the first one had a chance to be forwarded,
the second request will not be realigned. Then, http_wait_for_request()
will not perform such a realignment either because the request was
already parsed and marked as such. The consequence of this, is that the
rewrite of a sufficient number of such pipelined, unaligned requests may
leave less room past the request been processed than the configured
reserve, which can lead to a buffer overflow if request processing appends
some data past the end of the buffer.
A number of conditions are required for the bug to be triggered :
- HTTP keep-alive must be enabled ;
- HTTP inspection in TCP rules must be used ;
- some request appending rules are needed (reqadd, x-forwarded-for)
- since empty buffers are always realigned, the client must pipeline
enough requests so that the buffer always contains something till
the point where there is no more room for rewriting.
While such a configuration is quite unlikely to be met (which is
confirmed by the bug's lifetime), a few people do use these features
together for very specific usages. And more importantly, writing such
a configuration and the request to attack it is trivial.
A quick workaround consists in forcing keep-alive off by adding
"option httpclose" or "option forceclose" in the frontend. Alternatively,
disabling HTTP-based TCP inspection rules enough if the application
supports it.
At first glance, this bug does not look like it could lead to remote code
execution, as the overflowing part is controlled by the configuration and
not by the user. But some deeper analysis should be performed to confirm
this. And anyway, corrupting the process' memory and crashing it is quite
trivial.
Special thanks go to Yves Lafon from the W3C who reported this bug and
deployed significant efforts to collect the relevant data needed to
understand it in less than one week.
CVE-2013-1912 was assigned to this issue.
Note that 1.4 is also affected so the fix must be backported.
|
eval7(
char_u **arg,
typval_T *rettv,
evalarg_T *evalarg,
int want_string) // after "." operator
{
int evaluate = evalarg != NULL
&& (evalarg->eval_flags & EVAL_EVALUATE);
int len;
char_u *s;
char_u *start_leader, *end_leader;
int ret = OK;
char_u *alias;
/*
* Initialise variable so that clear_tv() can't mistake this for a
* string and free a string that isn't there.
*/
rettv->v_type = VAR_UNKNOWN;
/*
* Skip '!', '-' and '+' characters. They are handled later.
*/
start_leader = *arg;
if (eval_leader(arg, in_vim9script()) == FAIL)
return FAIL;
end_leader = *arg;
if (**arg == '.' && (!isdigit(*(*arg + 1))
#ifdef FEAT_FLOAT
|| in_old_script(2)
#endif
))
{
semsg(_(e_invalid_expression_str), *arg);
++*arg;
return FAIL;
}
switch (**arg)
{
/*
* Number constant.
*/
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '.': ret = eval_number(arg, rettv, evaluate, want_string);
// Apply prefixed "-" and "+" now. Matters especially when
// "->" follows.
if (ret == OK && evaluate && end_leader > start_leader
&& rettv->v_type != VAR_BLOB)
ret = eval7_leader(rettv, TRUE, start_leader, &end_leader);
break;
/*
* String constant: "string".
*/
case '"': ret = eval_string(arg, rettv, evaluate);
break;
/*
* Literal string constant: 'str''ing'.
*/
case '\'': ret = eval_lit_string(arg, rettv, evaluate);
break;
/*
* List: [expr, expr]
*/
case '[': ret = eval_list(arg, rettv, evalarg, TRUE);
break;
/*
* Dictionary: #{key: val, key: val}
*/
case '#': if (in_vim9script())
{
ret = vim9_bad_comment(*arg) ? FAIL : NOTDONE;
}
else if ((*arg)[1] == '{')
{
++*arg;
ret = eval_dict(arg, rettv, evalarg, TRUE);
}
else
ret = NOTDONE;
break;
/*
* Lambda: {arg, arg -> expr}
* Dictionary: {'key': val, 'key': val}
*/
case '{': if (in_vim9script())
ret = NOTDONE;
else
ret = get_lambda_tv(arg, rettv, in_vim9script(), evalarg);
if (ret == NOTDONE)
ret = eval_dict(arg, rettv, evalarg, FALSE);
break;
/*
* Option value: &name
*/
case '&': ret = eval_option(arg, rettv, evaluate);
break;
/*
* Environment variable: $VAR.
*/
case '$': ret = eval_env_var(arg, rettv, evaluate);
break;
/*
* Register contents: @r.
*/
case '@': ++*arg;
if (evaluate)
{
if (in_vim9script() && IS_WHITE_OR_NUL(**arg))
semsg(_(e_syntax_error_at_str), *arg);
else if (in_vim9script() && !valid_yank_reg(**arg, FALSE))
emsg_invreg(**arg);
else
{
rettv->v_type = VAR_STRING;
rettv->vval.v_string = get_reg_contents(**arg,
GREG_EXPR_SRC);
}
}
if (**arg != NUL)
++*arg;
break;
/*
* nested expression: (expression).
* or lambda: (arg) => expr
*/
case '(': ret = NOTDONE;
if (in_vim9script())
{
ret = get_lambda_tv(arg, rettv, TRUE, evalarg);
if (ret == OK && evaluate)
{
ufunc_T *ufunc = rettv->vval.v_partial->pt_func;
// Compile it here to get the return type. The return
// type is optional, when it's missing use t_unknown.
// This is recognized in compile_return().
if (ufunc->uf_ret_type->tt_type == VAR_VOID)
ufunc->uf_ret_type = &t_unknown;
if (compile_def_function(ufunc,
FALSE, COMPILE_TYPE(ufunc), NULL) == FAIL)
{
clear_tv(rettv);
ret = FAIL;
}
}
}
if (ret == NOTDONE)
{
*arg = skipwhite_and_linebreak(*arg + 1, evalarg);
ret = eval1(arg, rettv, evalarg); // recursive!
*arg = skipwhite_and_linebreak(*arg, evalarg);
if (**arg == ')')
++*arg;
else if (ret == OK)
{
emsg(_(e_missing_closing_paren));
clear_tv(rettv);
ret = FAIL;
}
}
break;
default: ret = NOTDONE;
break;
}
if (ret == NOTDONE)
{
/*
* Must be a variable or function name.
* Can also be a curly-braces kind of name: {expr}.
*/
s = *arg;
len = get_name_len(arg, &alias, evaluate, TRUE);
if (alias != NULL)
s = alias;
if (len <= 0)
ret = FAIL;
else
{
int flags = evalarg == NULL ? 0 : evalarg->eval_flags;
if (evaluate && in_vim9script() && len == 1 && *s == '_')
{
emsg(_(e_cannot_use_underscore_here));
ret = FAIL;
}
else if ((in_vim9script() ? **arg : *skipwhite(*arg)) == '(')
{
// "name(..." recursive!
*arg = skipwhite(*arg);
ret = eval_func(arg, evalarg, s, len, rettv, flags, NULL);
}
else if (flags & EVAL_CONSTANT)
ret = FAIL;
else if (evaluate)
{
// get the value of "true", "false" or a variable
if (len == 4 && in_vim9script() && STRNCMP(s, "true", 4) == 0)
{
rettv->v_type = VAR_BOOL;
rettv->vval.v_number = VVAL_TRUE;
ret = OK;
}
else if (len == 5 && in_vim9script()
&& STRNCMP(s, "false", 5) == 0)
{
rettv->v_type = VAR_BOOL;
rettv->vval.v_number = VVAL_FALSE;
ret = OK;
}
else if (len == 4 && in_vim9script()
&& STRNCMP(s, "null", 4) == 0)
{
rettv->v_type = VAR_SPECIAL;
rettv->vval.v_number = VVAL_NULL;
ret = OK;
}
else
ret = eval_variable(s, len, rettv, NULL,
EVAL_VAR_VERBOSE + EVAL_VAR_IMPORT);
}
else
{
// skip the name
check_vars(s, len);
ret = OK;
}
}
vim_free(alias);
}
// Handle following '[', '(' and '.' for expr[expr], expr.name,
// expr(expr), expr->name(expr)
if (ret == OK)
ret = handle_subscript(arg, rettv, evalarg, TRUE);
/*
* Apply logical NOT and unary '-', from right to left, ignore '+'.
*/
if (ret == OK && evaluate && end_leader > start_leader)
ret = eval7_leader(rettv, FALSE, start_leader, &end_leader);
return ret;
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
vim
|
605ec91e5a7330d61be313637e495fa02a6dc264
| 211,117,325,743,139,960,000,000,000,000,000,000,000 | 267 |
patch 8.2.3847: illegal memory access when using a lambda with an error
Problem: Illegal memory access when using a lambda with an error.
Solution: Avoid skipping over the NUL after a string.
|
_handle_carbons(xmpp_stanza_t *const stanza)
{
xmpp_stanza_t *carbons = xmpp_stanza_get_child_by_ns(stanza, STANZA_NS_CARBONS);
if (!carbons) {
return FALSE;
}
const char *name = xmpp_stanza_get_name(carbons);
if (!name) {
log_error("Unable to retrieve stanza name for Carbon");
return TRUE;
}
if (g_strcmp0(name, "private") == 0) {
log_info("Carbon received with private element.");
return FALSE;
}
if ((g_strcmp0(name, "received") != 0) && (g_strcmp0(name, "sent") != 0)) {
log_warning("Carbon received with unrecognised stanza name: %s", name);
return TRUE;
}
xmpp_stanza_t *forwarded = xmpp_stanza_get_child_by_ns(carbons, STANZA_NS_FORWARD);
if (!forwarded) {
log_warning("Carbon received with no forwarded element");
return TRUE;
}
xmpp_stanza_t *message = xmpp_stanza_get_child_by_name(forwarded, STANZA_NAME_MESSAGE);
if (!message) {
log_warning("Carbon received with no message element");
return TRUE;
}
char *message_txt = xmpp_message_get_body(message);
if (!message_txt) {
log_warning("Carbon received with no message.");
return TRUE;
}
const gchar *to = xmpp_stanza_get_to(message);
const gchar *from = xmpp_stanza_get_from(message);
// happens when receive a carbon of a self sent message
if (!to) to = from;
Jid *jid_from = jid_create(from);
Jid *jid_to = jid_create(to);
Jid *my_jid = jid_create(connection_get_fulljid());
// check for pgp encrypted message
char *enc_message = NULL;
xmpp_stanza_t *x = xmpp_stanza_get_child_by_ns(message, STANZA_NS_ENCRYPTED);
if (x) {
enc_message = xmpp_stanza_get_text(x);
}
// if we are the recipient, treat as standard incoming message
if (g_strcmp0(my_jid->barejid, jid_to->barejid) == 0) {
sv_ev_incoming_carbon(jid_from->barejid, jid_from->resourcepart, message_txt, enc_message);
// else treat as a sent message
} else {
sv_ev_outgoing_carbon(jid_to->barejid, message_txt, enc_message);
}
xmpp_ctx_t *ctx = connection_get_ctx();
xmpp_free(ctx, message_txt);
xmpp_free(ctx, enc_message);
jid_destroy(jid_from);
jid_destroy(jid_to);
jid_destroy(my_jid);
return TRUE;
}
| 1 |
[
"CWE-20",
"CWE-346"
] |
profanity
|
8e75437a7e43d4c55e861691f74892e666e29b0b
| 148,906,872,247,604,120,000,000,000,000,000,000,000 | 77 |
Add carbons from check
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.