func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void addReplyString(redisClient *c, char *s, size_t len) {
if (_installWriteEvent(c) != REDIS_OK) return;
if (_addReplyToBuffer(c,s,len) != REDIS_OK)
_addReplyStringToList(c,s,len);
} | 0 | [
"CWE-20"
] | redis | 697af434fbeb2e3ba2ba9687cd283ed1a2734fa5 | 307,220,260,436,128,430,000,000,000,000,000,000,000 | 5 | initial changes needed to turn the current VM code into a cache system. Tons of work to do still. |
Set an attribute */
static PHP_METHOD(PDOStatement, setAttribute)
{
long attr;
zval *value = NULL;
PHP_STMT_GET_OBJ;
if (FAILURE == zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "lz!", &attr, &value)) {
RETURN_FALSE;
}
if (!stmt->methods->set_attribute) {
goto fail;
}
PDO_STMT_CLEAR_ERR();
if (stmt->methods->set_attribute(stmt, attr, value TSRMLS_CC)) {
RETURN_TRUE;
}
fail:
if (!stmt->methods->set_attribute) {
pdo_raise_impl_error(stmt->dbh, stmt, "IM001", "This driver doesn't support setting attributes" TSRMLS_CC);
} else {
PDO_HANDLE_STMT_ERR();
}
RETURN_FALSE; | 0 | [
"CWE-476"
] | php-src | 6045de69c7dedcba3eadf7c4bba424b19c81d00d | 60,336,257,274,270,505,000,000,000,000,000,000,000 | 27 | Fix bug #73331 - do not try to serialize/unserialize objects wddx can not handle
Proper soltion would be to call serialize/unserialize and deal with the result,
but this requires more work that should be done by wddx maintainer (not me). |
static void vp7_get_quants(VP8Context *s)
{
VP56RangeCoder *c = &s->c;
int yac_qi = vp8_rac_get_uint(c, 7);
int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
} | 0 | [
"CWE-119",
"CWE-787"
] | FFmpeg | 6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef | 256,103,049,124,308,970,000,000,000,000,000,000,000 | 18 | avcodec/webp: Always set pix_fmt
Fixes: out of array access
Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632
Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Reviewed-by: "Ronald S. Bultje" <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
static int ZEND_FASTCALL ZEND_FETCH_DIM_RW_SPEC_CV_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_free_op free_op1;
zval *dim = &opline->op2.u.constant;
zval **container = _get_zval_ptr_ptr_cv(&opline->op1, EX(Ts), BP_VAR_RW TSRMLS_CC);
if (IS_CV == IS_VAR && !container) {
zend_error_noreturn(E_ERROR, "Cannot use string offset as an array");
}
zend_fetch_dimension_address(&EX_T(opline->result.u.var), container, dim, 0, BP_VAR_RW TSRMLS_CC);
if (IS_CV == IS_VAR && 0 &&
READY_TO_DESTROY(free_op1.var)) {
AI_USE_PTR(EX_T(opline->result.u.var).var);
if (!PZVAL_IS_REF(*EX_T(opline->result.u.var).var.ptr_ptr) &&
Z_REFCOUNT_PP(EX_T(opline->result.u.var).var.ptr_ptr) > 2) {
SEPARATE_ZVAL(EX_T(opline->result.u.var).var.ptr_ptr);
}
}
ZEND_VM_NEXT_OPCODE();
} | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 4,322,879,169,008,839,000,000,000,000,000,000,000 | 23 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
add8x2_8x2 (uint32_t a, uint32_t b)
{
uint32_t t = a + b;
t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK);
return t & RB_MASK;
} | 0 | [
"CWE-787"
] | cairo | c986a7310bb06582b7d8a566d5f007ba4e5e75bf | 123,670,353,201,325,540,000,000,000,000,000,000,000 | 6 | image: Enable inplace compositing with opacities for general routines
On a SNB i5-2500:
Speedups
========
firefox-chalkboard 34284.16 -> 19637.40: 1.74x speedup
swfdec-giant-steps 778.35 -> 665.37: 1.17x speedup
ocitysmap 485.64 -> 431.94: 1.12x speedup
Slowdowns
=========
firefox-fishbowl 46878.98 -> 54407.14: 1.16x slowdown
That slow down is due to overhead of the increased number of calls to
pixman_image_composite32() (pixman_transform_point for analyzing the
source extents in particular) outweighing any advantage gained by
performing the rasterisation in a single pass and eliding gaps. The
solution that has been floated in the past is for an interface into
pixman to only perform the analysis once and then to return a kernel to
use for all spans.
Signed-off-by: Chris Wilson <[email protected]> |
SWFInput_getChar(SWFInput input)
{
return input->getChar(input);
} | 0 | [
"CWE-190",
"CWE-703"
] | libming | a009a38dce1d9316cad1ab522b813b1d5ba4c62a | 295,382,666,213,845,800,000,000,000,000,000,000,000 | 4 | Fix left shift of a negative value in SWFInput_readSBits. Check for number before before left-shifting by (number-1). |
nv_suspend(cmdarg_T *cap)
{
clearop(cap->oap);
if (VIsual_active)
end_visual_mode(); // stop Visual mode
do_cmdline_cmd((char_u *)"stop");
} | 0 | [
"CWE-416"
] | vim | 35a9a00afcb20897d462a766793ff45534810dc3 | 70,054,763,933,963,460,000,000,000,000,000,000,000 | 7 | patch 8.2.3428: using freed memory when replacing
Problem: Using freed memory when replacing. (Dhiraj Mishra)
Solution: Get the line pointer after calling ins_copychar(). |
static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
{
int i, num;
struct cr_regs *cr;
num = obj->nr_tlb_entries;
cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
if (!cr)
return 0;
num = __dump_tlb_entries(obj, cr, num);
for (i = 0; i < num; i++)
iotlb_dump_cr(obj, cr + i, s);
kfree(cr);
return 0;
} | 0 | [] | linux | e203db293863fa15b4b1917d4398fb5bd63c4e88 | 174,226,196,366,797,750,000,000,000,000,000,000,000 | 18 | iommu/omap: Fix debug_read_tlb() to use seq_printf()
The debug_read_tlb() uses the sprintf() functions directly on the buffer
allocated by buf = kmalloc(count), without taking into account the size
of the buffer, with the consequence corrupting the heap, depending on
the count requested by the user.
The patch fixes the issue replacing sprintf() by seq_printf().
Signed-off-by: Salva Peiró <[email protected]>
Signed-off-by: Joerg Roedel <[email protected]> |
void cil_destroy_roleallow(struct cil_roleallow *roleallow)
{
if (roleallow == NULL) {
return;
}
free(roleallow);
} | 0 | [
"CWE-125"
] | selinux | 340f0eb7f3673e8aacaf0a96cbfcd4d12a405521 | 243,429,157,132,785,500,000,000,000,000,000,000,000 | 8 | libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <[email protected]> |
Header::setView(const string& view)
{
insert ("view", StringAttribute (view));
} | 0 | [
"CWE-125"
] | openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 104,819,741,889,274,110,000,000,000,000,000,000,000 | 4 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
int ext4_mark_iloc_dirty(handle_t *handle,
struct inode *inode, struct ext4_iloc *iloc)
{
int err = 0;
if (test_opt(inode->i_sb, I_VERSION))
inode_inc_iversion(inode);
/* the do_update_inode consumes one bh->b_count */
get_bh(iloc->bh);
/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
err = ext4_do_update_inode(handle, inode, iloc);
put_bh(iloc->bh);
return err;
} | 0 | [
"CWE-703"
] | linux | 744692dc059845b2a3022119871846e74d4f6e11 | 215,269,125,498,011,250,000,000,000,000,000,000,000 | 16 | ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]> |
void VP8ComponentDecoder::SendToVirtualThread::init(GenericWorker * all_workers) {
this->all_workers = all_workers;
} | 0 | [
"CWE-1187"
] | lepton | 82167c144a322cc956da45407f6dce8d4303d346 | 29,552,152,430,200,604,000,000,000,000,000,000,000 | 3 | fix #87 : always check that threads_required set up the appropriate number of threads---fire off nop functions on unused threads for consistency |
exif_mnote_data_pentax_new (ExifMem *mem)
{
ExifMnoteData *d;
if (!mem) return NULL;
d = exif_mem_alloc (mem, sizeof (ExifMnoteDataPentax));
if (!d) return NULL;
exif_mnote_data_construct (d, mem);
/* Set up function pointers */
d->methods.free = exif_mnote_data_pentax_free;
d->methods.set_byte_order = exif_mnote_data_pentax_set_byte_order;
d->methods.set_offset = exif_mnote_data_pentax_set_offset;
d->methods.load = exif_mnote_data_pentax_load;
d->methods.save = exif_mnote_data_pentax_save;
d->methods.count = exif_mnote_data_pentax_count;
d->methods.get_id = exif_mnote_data_pentax_get_id;
d->methods.get_name = exif_mnote_data_pentax_get_name;
d->methods.get_title = exif_mnote_data_pentax_get_title;
d->methods.get_description = exif_mnote_data_pentax_get_description;
d->methods.get_value = exif_mnote_data_pentax_get_value;
return d;
} | 0 | [
"CWE-125"
] | libexif | 435e21f05001fb03f9f186fa7cbc69454afd00d1 | 7,990,158,385,219,666,000,000,000,000,000,000,000 | 26 | Fix MakerNote tag size overflow issues at read time.
Check for a size overflow while reading tags, which ensures that the
size is always consistent for the given components and type of the
entry, making checking further down superfluous.
This provides an alternate fix for
https://sourceforge.net/p/libexif/bugs/125/ CVE-2016-6328 and for all
the MakerNote types. Likely, this makes both commits 41bd0423 and
89e5b1c1 redundant as it ensures that MakerNote entries are well-formed
when they're populated.
Some improvements on top by Marcus Meissner <[email protected]>
CVE-2020-13112 |
int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
{
int error,got_error,flag;
uint key,UNINIT_VAR(left_length),b_type,field;
ha_rows records,del_blocks;
my_off_t used,empty,pos,splits,UNINIT_VAR(start_recpos),
del_length,link_used,start_block;
uchar *record= 0, *UNINIT_VAR(to);
char llbuff[22],llbuff2[22],llbuff3[22];
ha_checksum intern_record_checksum;
ha_checksum key_checksum[HA_MAX_POSSIBLE_KEY];
my_bool static_row_size;
MI_KEYDEF *keyinfo;
MI_BLOCK_INFO block_info;
DBUG_ENTER("chk_data_link");
if (!(param->testflag & T_SILENT))
{
if (extend)
puts("- check records and index references");
else
puts("- check record links");
}
if (!mi_alloc_rec_buff(info, -1, &record))
{
mi_check_print_error(param,"Not enough memory for record");
DBUG_RETURN(-1);
}
records=del_blocks=0;
used=link_used=splits=del_length=0;
intern_record_checksum=param->glob_crc=0;
got_error=error=0;
empty=info->s->pack.header_length;
/* Check how to calculate checksum of rows */
static_row_size=1;
if (info->s->data_file_type == COMPRESSED_RECORD)
{
for (field=0 ; field < info->s->base.fields ; field++)
{
if (info->s->rec[field].base_type == FIELD_BLOB ||
info->s->rec[field].base_type == FIELD_VARCHAR)
{
static_row_size=0;
break;
}
}
}
pos=my_b_tell(¶m->read_cache);
bzero((char*) key_checksum, info->s->base.keys * sizeof(key_checksum[0]));
while (pos < info->state->data_file_length)
{
if (*killed_ptr(param))
goto err2;
switch (info->s->data_file_type) {
case STATIC_RECORD:
if (my_b_read(¶m->read_cache,(uchar*) record,
info->s->base.pack_reclength))
goto err;
start_recpos=pos;
pos+=info->s->base.pack_reclength;
splits++;
if (*record == '\0')
{
del_blocks++;
del_length+=info->s->base.pack_reclength;
continue; /* Record removed */
}
param->glob_crc+= mi_static_checksum(info,record);
used+=info->s->base.pack_reclength;
break;
case DYNAMIC_RECORD:
flag=block_info.second_read=0;
block_info.next_filepos=pos;
do
{
if (_mi_read_cache(¶m->read_cache,(uchar*) block_info.header,
(start_block=block_info.next_filepos),
sizeof(block_info.header),
(flag ? 0 : READING_NEXT) | READING_HEADER))
goto err;
if (start_block & (MI_DYN_ALIGN_SIZE-1))
{
mi_check_print_error(param,"Wrong aligned block at %s",
llstr(start_block,llbuff));
goto err2;
}
b_type=_mi_get_block_info(&block_info,-1,start_block);
if (b_type & (BLOCK_DELETED | BLOCK_ERROR | BLOCK_SYNC_ERROR |
BLOCK_FATAL_ERROR))
{
if (b_type & BLOCK_SYNC_ERROR)
{
if (flag)
{
mi_check_print_error(param,"Unexpected byte: %d at link: %s",
(int) block_info.header[0],
llstr(start_block,llbuff));
goto err2;
}
pos=block_info.filepos+block_info.block_len;
goto next;
}
if (b_type & BLOCK_DELETED)
{
if (block_info.block_len < info->s->base.min_block_length)
{
mi_check_print_error(param,
"Deleted block with impossible length %lu at %s",
block_info.block_len,llstr(pos,llbuff));
goto err2;
}
if ((block_info.next_filepos != HA_OFFSET_ERROR &&
block_info.next_filepos >= info->state->data_file_length) ||
(block_info.prev_filepos != HA_OFFSET_ERROR &&
block_info.prev_filepos >= info->state->data_file_length))
{
mi_check_print_error(param,"Delete link points outside datafile at %s",
llstr(pos,llbuff));
goto err2;
}
del_blocks++;
del_length+=block_info.block_len;
pos=block_info.filepos+block_info.block_len;
splits++;
goto next;
}
mi_check_print_error(param,"Wrong bytesec: %d-%d-%d at linkstart: %s",
block_info.header[0],block_info.header[1],
block_info.header[2],
llstr(start_block,llbuff));
goto err2;
}
if (info->state->data_file_length < block_info.filepos+
block_info.block_len)
{
mi_check_print_error(param,
"Recordlink that points outside datafile at %s",
llstr(pos,llbuff));
got_error=1;
break;
}
splits++;
if (!flag++) /* First block */
{
start_recpos=pos;
pos=block_info.filepos+block_info.block_len;
if (block_info.rec_len > (uint) info->s->base.max_pack_length)
{
mi_check_print_error(param,"Found too long record (%lu) at %s",
(ulong) block_info.rec_len,
llstr(start_recpos,llbuff));
got_error=1;
break;
}
if (info->s->base.blobs)
{
if (!(to= mi_alloc_rec_buff(info, block_info.rec_len,
&info->rec_buff)))
{
mi_check_print_error(param,
"Not enough memory (%lu) for blob at %s",
(ulong) block_info.rec_len,
llstr(start_recpos,llbuff));
got_error=1;
break;
}
}
else
to= info->rec_buff;
left_length=block_info.rec_len;
}
if (left_length < block_info.data_len)
{
mi_check_print_error(param,"Found too long record (%lu) at %s",
(ulong) block_info.data_len,
llstr(start_recpos,llbuff));
got_error=1;
break;
}
if (_mi_read_cache(¶m->read_cache,(uchar*) to,block_info.filepos,
(uint) block_info.data_len,
flag == 1 ? READING_NEXT : 0))
goto err;
to+=block_info.data_len;
link_used+= block_info.filepos-start_block;
used+= block_info.filepos - start_block + block_info.data_len;
empty+=block_info.block_len-block_info.data_len;
left_length-=block_info.data_len;
if (left_length)
{
if (b_type & BLOCK_LAST)
{
mi_check_print_error(param,
"Wrong record length %s of %s at %s",
llstr(block_info.rec_len-left_length,llbuff),
llstr(block_info.rec_len, llbuff2),
llstr(start_recpos,llbuff3));
got_error=1;
break;
}
if (info->state->data_file_length < block_info.next_filepos)
{
mi_check_print_error(param,
"Found next-recordlink that points outside datafile at %s",
llstr(block_info.filepos,llbuff));
got_error=1;
break;
}
}
} while (left_length);
if (! got_error)
{
if (_mi_rec_unpack(info,record,info->rec_buff,block_info.rec_len) ==
MY_FILE_ERROR)
{
mi_check_print_error(param,"Found wrong record at %s",
llstr(start_recpos,llbuff));
got_error=1;
}
else
{
info->checksum=mi_checksum(info,record);
if (param->testflag & (T_EXTEND | T_MEDIUM | T_VERBOSE))
{
if (_mi_rec_check(info,record, info->rec_buff,block_info.rec_len,
test(info->s->calc_checksum)))
{
mi_check_print_error(param,"Found wrong packed record at %s",
llstr(start_recpos,llbuff));
got_error=1;
}
}
if (!got_error)
param->glob_crc+= info->checksum;
}
}
else if (!flag)
pos=block_info.filepos+block_info.block_len;
break;
case COMPRESSED_RECORD:
if (_mi_read_cache(¶m->read_cache,(uchar*) block_info.header, pos,
info->s->pack.ref_length, READING_NEXT))
goto err;
start_recpos=pos;
splits++;
(void) _mi_pack_get_block_info(info, &info->bit_buff, &block_info,
&info->rec_buff, -1, start_recpos);
pos=block_info.filepos+block_info.rec_len;
if (block_info.rec_len < (uint) info->s->min_pack_length ||
block_info.rec_len > (uint) info->s->max_pack_length)
{
mi_check_print_error(param,
"Found block with wrong recordlength: %d at %s",
block_info.rec_len, llstr(start_recpos,llbuff));
got_error=1;
break;
}
if (_mi_read_cache(¶m->read_cache,(uchar*) info->rec_buff,
block_info.filepos, block_info.rec_len, READING_NEXT))
goto err;
if (_mi_pack_rec_unpack(info, &info->bit_buff, record,
info->rec_buff, block_info.rec_len))
{
mi_check_print_error(param,"Found wrong record at %s",
llstr(start_recpos,llbuff));
got_error=1;
}
if (static_row_size)
param->glob_crc+= mi_static_checksum(info,record);
else
param->glob_crc+= mi_checksum(info,record);
link_used+= (block_info.filepos - start_recpos);
used+= (pos-start_recpos);
break;
case BLOCK_RECORD:
assert(0); /* Impossible */
} /* switch */
if (! got_error)
{
intern_record_checksum+=(ha_checksum) start_recpos;
records++;
if (param->testflag & T_WRITE_LOOP && records % WRITE_COUNT == 0)
{
printf("%s\r", llstr(records,llbuff)); (void) fflush(stdout);
}
/* Check if keys match the record */
for (key=0,keyinfo= info->s->keyinfo; key < info->s->base.keys;
key++,keyinfo++)
{
if (mi_is_key_active(info->s->state.key_map, key))
{
if(!(keyinfo->flag & HA_FULLTEXT))
{
uint key_length=_mi_make_key(info,key,info->lastkey,record,
start_recpos);
if (extend)
{
/* We don't need to lock the key tree here as we don't allow
concurrent threads when running myisamchk
*/
int search_result=
#ifdef HAVE_RTREE_KEYS
(keyinfo->flag & HA_SPATIAL) ?
rtree_find_first(info, key, info->lastkey, key_length,
MBR_EQUAL | MBR_DATA) :
#endif
_mi_search(info,keyinfo,info->lastkey,key_length,
SEARCH_SAME, info->s->state.key_root[key]);
if (search_result)
{
mi_check_print_error(param,"Record at: %10s "
"Can't find key for index: %2d",
llstr(start_recpos,llbuff),key+1);
if (error++ > MAXERR || !(param->testflag & T_VERBOSE))
goto err2;
}
}
else
key_checksum[key]+=mi_byte_checksum((uchar*) info->lastkey,
key_length);
}
}
}
}
else
{
got_error=0;
if (error++ > MAXERR || !(param->testflag & T_VERBOSE))
goto err2;
}
next:; /* Next record */
}
if (param->testflag & T_WRITE_LOOP)
{
(void) fputs(" \r",stdout); (void) fflush(stdout);
}
if (records != info->state->records)
{
mi_check_print_error(param,"Record-count is not ok; is %-10s Should be: %s",
llstr(records,llbuff), llstr(info->state->records,llbuff2));
error=1;
}
else if (param->record_checksum &&
param->record_checksum != intern_record_checksum)
{
mi_check_print_error(param,
"Keypointers and record positions doesn't match");
error=1;
}
else if (param->glob_crc != info->state->checksum &&
(info->s->options &
(HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)))
{
mi_check_print_warning(param,
"Record checksum is not the same as checksum stored in the index file\n");
error=1;
}
else if (!extend)
{
for (key=0 ; key < info->s->base.keys; key++)
{
if (key_checksum[key] != param->key_crc[key] &&
!(info->s->keyinfo[key].flag & (HA_FULLTEXT | HA_SPATIAL)))
{
mi_check_print_error(param,"Checksum for key: %2d doesn't match checksum for records",
key+1);
error=1;
}
}
}
if (del_length != info->state->empty)
{
mi_check_print_warning(param,
"Found %s deleted space. Should be %s",
llstr(del_length,llbuff2),
llstr(info->state->empty,llbuff));
}
if (used+empty+del_length != info->state->data_file_length)
{
mi_check_print_warning(param,
"Found %s record-data and %s unused data and %s deleted-data",
llstr(used,llbuff),llstr(empty,llbuff2),
llstr(del_length,llbuff3));
mi_check_print_warning(param,
"Total %s, Should be: %s",
llstr((used+empty+del_length),llbuff),
llstr(info->state->data_file_length,llbuff2));
}
if (del_blocks != info->state->del)
{
mi_check_print_warning(param,
"Found %10s deleted blocks Should be: %s",
llstr(del_blocks,llbuff),
llstr(info->state->del,llbuff2));
}
if (splits != info->s->state.split)
{
mi_check_print_warning(param,
"Found %10s key parts. Should be: %s",
llstr(splits,llbuff),
llstr(info->s->state.split,llbuff2));
}
if (param->testflag & T_INFO)
{
if (param->warning_printed || param->error_printed)
puts("");
if (used != 0 && ! param->error_printed)
{
printf("Records:%18s M.recordlength:%9lu Packed:%14.0f%%\n",
llstr(records,llbuff), (long)((used-link_used)/records),
(info->s->base.blobs ? 0.0 :
(ulonglong2double((ulonglong) info->s->base.reclength*records)-
my_off_t2double(used))/
ulonglong2double((ulonglong) info->s->base.reclength*records)*100.0));
printf("Recordspace used:%9.0f%% Empty space:%12d%% Blocks/Record: %6.2f\n",
(ulonglong2double(used-link_used)/ulonglong2double(used-link_used+empty)*100.0),
(!records ? 100 : (int) (ulonglong2double(del_length+empty)/
my_off_t2double(used)*100.0)),
ulonglong2double(splits - del_blocks) / records);
}
printf("Record blocks:%12s Delete blocks:%10s\n",
llstr(splits-del_blocks,llbuff),llstr(del_blocks,llbuff2));
printf("Record data: %12s Deleted data: %10s\n",
llstr(used-link_used,llbuff),llstr(del_length,llbuff2));
printf("Lost space: %12s Linkdata: %10s\n",
llstr(empty,llbuff),llstr(link_used,llbuff2));
}
my_free(mi_get_rec_buff_ptr(info, record));
DBUG_RETURN (error);
err:
mi_check_print_error(param,"got error: %d when reading datafile at record: %s",my_errno, llstr(records,llbuff));
err2:
my_free(mi_get_rec_buff_ptr(info, record));
param->testflag|=T_RETRY_WITHOUT_QUICK;
DBUG_RETURN(1);
} /* chk_data_link */ | 0 | [
"CWE-362"
] | mysql-server | 4e5473862e6852b0f3802b0cd0c6fa10b5253291 | 50,370,116,541,828,390,000,000,000,000,000,000,000 | 442 | Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations. |
gerbv_gdk_draw_oval(GdkPixmap *pixmap, GdkGC *gc,
int filled, gint x, gint y, gint x_axis, gint y_axis,
double angle_deg)
{
gint width;
GdkPoint points[2];
GdkGC *local_gc = gdk_gc_new(pixmap);
gdk_gc_copy(local_gc, gc);
if (x_axis > y_axis) {
/* Draw in x axis */
width = y_axis;
points[0].x = -(x_axis >> 1) + (y_axis >> 1);
points[0].y = 0;
points[1].x = (x_axis >> 1) - (y_axis >> 1);
points[1].y = 0;
} else {
/* Draw in y axis */
width = x_axis;
points[0].x = 0;
points[0].y = -(y_axis >> 1) + (x_axis >> 1);
points[1].x = 0;
points[1].y = (y_axis >> 1) - (x_axis >> 1);
}
points[0] = rotate_point(points[0], angle_deg);
points[0].x += x;
points[0].y += y;
points[1] = rotate_point(points[1], angle_deg);
points[1].x += x;
points[1].y += y;
gdk_gc_set_line_attributes(local_gc, width,
GDK_LINE_SOLID, GDK_CAP_ROUND, GDK_JOIN_MITER);
gdk_draw_line(pixmap, local_gc,
points[0].x, points[0].y,
points[1].x, points[1].y);
gdk_gc_unref(local_gc);
return;
} /* gerbv_gdk_draw_oval */ | 0 | [
"CWE-703"
] | gerbv | b2c2f8da851f2ac8079a91ce9d498d87ff96abcf | 185,464,541,094,378,850,000,000,000,000,000,000,000 | 45 | Avoid direct access on array of unknown size
Be requiring a `gerbv_simplified_amacro_t` the `dgk_draw_amacro_funcs` can be sure of the length of the parameter array. |
static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
{
unsigned long key = 0UL;
struct cgroup_subsys *ss;
int i;
for_each_subsys(ss, i)
key += (unsigned long)css[i];
key = (key >> 16) ^ key;
return key;
} | 0 | [
"CWE-416"
] | linux | a06247c6804f1a7c86a2e5398a4c1f1db1471848 | 325,892,034,961,060,900,000,000,000,000,000,000,000 | 12 | psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: [email protected]
Suggested-by: Linus Torvalds <[email protected]>
Analyzed-by: Eric Biggers <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected] |
static INLINE void IncrementX963KdfCounter(byte* inOutCtr)
{
int i;
/* in network byte order so start at end and work back */
for (i = 3; i >= 0; i--) {
if (++inOutCtr[i]) /* we're done unless we overflow */
return;
}
} | 0 | [
"CWE-200"
] | wolfssl | 9b9568d500f31f964af26ba8d01e542e1f27e5ca | 1,527,522,570,448,588,800,000,000,000,000,000,000 | 10 | Change ECDSA signing to use blinding. |
void ip_mc_destroy_dev(struct in_device *in_dev)
{
struct ip_mc_list *i;
ASSERT_RTNL();
/* Deactivate timers */
ip_mc_down(in_dev);
#ifdef CONFIG_IP_MULTICAST
igmpv3_clear_delrec(in_dev);
#endif
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
ip_mc_clear_src(i);
ip_ma_put(i);
}
} | 0 | [
"CWE-362"
] | linux | 23d2b94043ca8835bd1e67749020e839f396a1c2 | 288,025,756,325,275,830,000,000,000,000,000,000,000 | 19 | igmp: Add ip_mc_list lock in ip_check_mc_rcu
I got below panic when doing fuzz test:
Kernel panic - not syncing: panic_on_warn set ...
CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
dump_stack_lvl+0x7a/0x9b
panic+0x2cd/0x5af
end_report.cold+0x5a/0x5a
kasan_report+0xec/0x110
ip_check_mc_rcu+0x556/0x5d0
__mkroute_output+0x895/0x1740
ip_route_output_key_hash_rcu+0x2d0/0x1050
ip_route_output_key_hash+0x182/0x2e0
ip_route_output_flow+0x28/0x130
udp_sendmsg+0x165d/0x2280
udpv6_sendmsg+0x121e/0x24f0
inet6_sendmsg+0xf7/0x140
sock_sendmsg+0xe9/0x180
____sys_sendmsg+0x2b8/0x7a0
___sys_sendmsg+0xf0/0x160
__sys_sendmmsg+0x17e/0x3c0
__x64_sys_sendmmsg+0x9e/0x100
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x462eb9
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8
48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9
RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc
R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff
It is one use-after-free in ip_check_mc_rcu.
In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection.
But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock.
Signed-off-by: Liu Jian <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
TIFFWriteDirectoryTagCheckedSlong8(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, int64 value)
{
int64 m;
assert(sizeof(int64)==8);
if( !(tif->tif_flags&TIFF_BIGTIFF) ) {
TIFFErrorExt(tif->tif_clientdata,"TIFFWriteDirectoryTagCheckedSlong8","SLONG8 not allowed for ClassicTIFF");
return(0);
}
m=value;
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabLong8((uint64*)(&m));
return(TIFFWriteDirectoryTagData(tif,ndir,dir,tag,TIFF_SLONG8,1,8,&m));
} | 0 | [
"CWE-617"
] | libtiff | de144fd228e4be8aa484c3caf3d814b6fa88c6d9 | 135,613,534,168,365,940,000,000,000,000,000,000,000 | 13 | TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963 |
static void virtio_net_tx_bh(void *opaque)
{
VirtIONetQueue *q = opaque;
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
int32_t ret;
/* This happens when device was stopped but BH wasn't. */
if (!vdev->vm_running) {
/* Make sure tx waiting is set, so we'll run when restarted. */
assert(q->tx_waiting);
return;
}
q->tx_waiting = 0;
/* Just in case the driver is not ready on more */
if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
return;
}
ret = virtio_net_flush_tx(q);
if (ret == -EBUSY || ret == -EINVAL) {
return; /* Notification re-enable handled by tx_complete or device
* broken */
}
/* If we flush a full burst of packets, assume there are
* more coming and immediately reschedule */
if (ret >= n->tx_burst) {
qemu_bh_schedule(q->tx_bh);
q->tx_waiting = 1;
return;
}
/* If less than a full burst, re-enable notification and flush
* anything that may have come in while we weren't looking. If
* we find something, assume the guest is still active and reschedule */
virtio_queue_set_notification(q->tx_vq, 1);
ret = virtio_net_flush_tx(q);
if (ret == -EINVAL) {
return;
} else if (ret > 0) {
virtio_queue_set_notification(q->tx_vq, 0);
qemu_bh_schedule(q->tx_bh);
q->tx_waiting = 1;
}
} | 0 | [
"CWE-703"
] | qemu | abe300d9d894f7138e1af7c8e9c88c04bfe98b37 | 97,599,299,969,842,840,000,000,000,000,000,000,000 | 48 | virtio-net: fix map leaking on error during receive
Commit bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg")
tries to fix the use after free of the sg by caching the virtqueue
elements in an array and unmap them at once after receiving the
packets, But it forgot to unmap the cached elements on error which
will lead to leaking of mapping and other unexpected results.
Fixing this by detaching the cached elements on error. This addresses
CVE-2022-26353.
Reported-by: Victor Tom <[email protected]>
Cc: [email protected]
Fixes: CVE-2022-26353
Fixes: bedd7e93d0196 ("virtio-net: fix use after unmap/free for sg")
Reviewed-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
struct net *net = sock_net(sk);
struct xfrm_state *x;
if (hdr->sadb_msg_len != sizeof(struct sadb_msg)/8)
return -EOPNOTSUPP;
if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
return 0;
x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
if (x == NULL)
return 0;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_ACQ)
x->km.state = XFRM_STATE_ERROR;
spin_unlock_bh(&x->lock);
xfrm_state_put(x);
return 0;
} | 0 | [] | linux | 096f41d3a8fcbb8dde7f71379b1ca85fe213eded | 169,987,999,497,411,830,000,000,000,000,000,000,000 | 23 | af_key: Fix sadb_x_ipsecrequest parsing
The parsing of sadb_x_ipsecrequest is broken in a number of ways.
First of all we're not verifying sadb_x_ipsecrequest_len. This
is needed when the structure carries addresses at the end. Worse
we don't even look at the length when we parse those optional
addresses.
The migration code had similar parsing code that's better but
it also has some deficiencies. The length is overcounted first
of all as it includes the header itself. It also fails to check
the length before dereferencing the sa_family field.
This patch fixes those problems in parse_sockaddr_pair and then
uses it in parse_ipsecrequest.
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
return 0;
switch (usage->hid & HID_USAGE) {
case 0x301: ch_map_key_clear(KEY_PROG1); break;
case 0x302: ch_map_key_clear(KEY_PROG2); break;
case 0x303: ch_map_key_clear(KEY_PROG3); break;
default:
return 0;
}
return 1;
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 4ab25786c87eb20857bbb715c3ae34ec8fd6a214 | 90,652,779,454,795,710,000,000,000,000,000,000,000 | 17 | HID: fix a couple of off-by-ones
There are a few very theoretical off-by-one bugs in report descriptor size
checking when performing a pre-parsing fixup. Fix those.
Cc: [email protected]
Reported-by: Ben Hawkes <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]> |
UnicodeStringTest::TestSizeofUnicodeString() {
// See the comments in unistr.h near the declaration of UnicodeString's fields.
// See the API comments for UNISTR_OBJECT_SIZE.
size_t sizeofUniStr=sizeof(UnicodeString);
size_t expected=UNISTR_OBJECT_SIZE;
if(expected!=sizeofUniStr) {
// Possible cause: UNISTR_OBJECT_SIZE may not be a multiple of sizeof(pointer),
// of the compiler might add more internal padding than expected.
errln("sizeof(UnicodeString)=%d, expected UNISTR_OBJECT_SIZE=%d",
(int)sizeofUniStr, (int)expected);
}
if(sizeofUniStr<32) {
errln("sizeof(UnicodeString)=%d < 32, probably too small", (int)sizeofUniStr);
}
// We assume that the entire UnicodeString object,
// minus the vtable pointer and 2 bytes for flags and short length,
// is available for internal storage of UChars.
int32_t expectedStackBufferLength=((int32_t)UNISTR_OBJECT_SIZE-sizeof(void *)-2)/U_SIZEOF_UCHAR;
UnicodeString s;
const UChar *emptyBuffer=s.getBuffer();
for(int32_t i=0; i<expectedStackBufferLength; ++i) {
s.append((UChar)0x2e);
}
const UChar *fullBuffer=s.getBuffer();
if(fullBuffer!=emptyBuffer) {
errln("unexpected reallocation when filling with assumed stack buffer size of %d",
expectedStackBufferLength);
}
const UChar *terminatedBuffer=s.getTerminatedBuffer();
if(terminatedBuffer==emptyBuffer) {
errln("unexpected keeping stack buffer when overfilling assumed stack buffer size of %d",
expectedStackBufferLength);
}
} | 0 | [
"CWE-190",
"CWE-787"
] | icu | b7d08bc04a4296982fcef8b6b8a354a9e4e7afca | 102,662,207,929,494,920,000,000,000,000,000,000,000 | 34 | ICU-20958 Prevent SEGV_MAPERR in append
See #971 |
HttpTransact::HandleCacheOpenReadPush(State* s, bool read_successful)
{
if (read_successful) {
s->cache_info.action = CACHE_PREPARE_TO_UPDATE;
} else {
s->cache_info.action = CACHE_PREPARE_TO_WRITE;
}
TRANSACT_RETURN(SM_ACTION_READ_PUSH_HDR, HandlePushResponseHdr);
} | 0 | [
"CWE-119"
] | trafficserver | 8b5f0345dade6b2822d9b52c8ad12e63011a5c12 | 138,131,860,498,127,300,000,000,000,000,000,000,000 | 10 | Fix the internal buffer sizing. Thanks to Sudheer for helping isolating this bug |
Value ExpressionFilter::evaluate(const Document& root) const {
// We are guaranteed at parse time that this isn't using our _varId.
const Value inputVal = _input->evaluate(root);
if (inputVal.nullish())
return Value(BSONNULL);
uassert(28651,
str::stream() << "input to $filter must be an array not "
<< typeName(inputVal.getType()),
inputVal.isArray());
const vector<Value>& input = inputVal.getArray();
if (input.empty())
return inputVal;
vector<Value> output;
auto& vars = getExpressionContext()->variables;
for (const auto& elem : input) {
vars.setValue(_varId, elem);
if (_filter->evaluate(root).coerceToBool()) {
output.push_back(std::move(elem));
}
}
return Value(std::move(output));
} | 0 | [
"CWE-835"
] | mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 299,572,126,869,818,900,000,000,000,000,000,000,000 | 28 | SERVER-38070 fix infinite loop in agg expression |
static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", tun->owner);
} | 0 | [
"CWE-119"
] | linux-2.6 | 3c8a9c63d5fd738c261bd0ceece04d9c8357ca13 | 288,173,293,747,621,340,000,000,000,000,000,000,000 | 6 | tun/tap: Fix crashes if open() /dev/net/tun and then poll() it.
Fix NULL pointer dereference in tun_chr_pool() introduced by commit
33dccbb050bbe35b88ca8cf1228dcf3e4d4b3554 ("tun: Limit amount of queued
packets per device") and triggered by this code:
int fd;
struct pollfd pfd;
fd = open("/dev/net/tun", O_RDWR);
pfd.fd = fd;
pfd.events = POLLIN | POLLOUT;
poll(&pfd, 1, 0);
Reported-by: Eugene Kapun <[email protected]>
Signed-off-by: Mariusz Kozlowski <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static uint32_t parse_hex(const char *s, size_t nchars, uint32_t *res)
{
uint32_t v = 0;
size_t n;
int c;
for (n = 0; n < nchars; n++) {
c = hex(s[n]);
if (c == (char)-1) {
*res = TEE_ERROR_BAD_FORMAT;
goto out;
}
v = (v << 4) + c;
}
*res = TEE_SUCCESS;
out:
return v;
} | 0 | [
"CWE-703",
"CWE-189"
] | optee_os | 7e768f8a473409215fe3fff8f6e31f8a3a0103c6 | 6,171,155,945,188,572,000,000,000,000,000,000,000 | 18 | core: clear the entire TA area
Previously we cleared (memset to zero) the size corresponding to code
and data segments, however the allocation for the TA is made on the
granularity of the memory pool, meaning that we did not clear all memory
and because of that we could potentially leak code and data of a
previous loaded TA.
Fixes: OP-TEE-2018-0006: "Potential disclosure of previously loaded TA
code and data"
Signed-off-by: Joakim Bech <[email protected]>
Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8)
Suggested-by: Jens Wiklander <[email protected]>
Reviewed-by: Jens Wiklander <[email protected]>
Reported-by: Riscure <[email protected]>
Reported-by: Alyssa Milburn <[email protected]>
Acked-by: Etienne Carriere <[email protected]> |
bool allow_svhandler_flash_range(size_t start, size_t end) {
// Protect from overflow.
if (start > end) return false;
// Disallow non-flash writes.
if (start < FLASH_ORIGIN || end > FLASH_END) return false;
// Disallow writes to any sectors which aren't allowed.
bool startAllowed = false;
bool endAllowed = false;
for (const FlashSector* s = flash_sector_map; s->use != FLASH_INVALID; s++) {
if (allow_svhandler_flash_sector(s)) {
if (!startAllowed &&
start + 1 > start &&
do_memory_ranges_overlap(start, start + 1, s->start, s->start + s->len)) {
startAllowed = true;
}
if (!endAllowed &&
end - 1 < end &&
do_memory_ranges_overlap(end - 1, end, s->start, s->start + s->len)) {
endAllowed = true;
}
} else {
if (do_memory_ranges_overlap(start, end, s->start, s->start + s->len)) return false;
}
}
// Ensure writes start and end in allowed sectors. As long as flash_sector_map consists of
// contiguous sectors, this will ensure no writes can target flash outside the map.
if (!startAllowed || !endAllowed) return false;
return true;
} | 0 | [
"CWE-668",
"CWE-125"
] | keepkey-firmware | 447c1f038a31378ab9589965c098467d9ea6cccc | 242,558,643,010,665,930,000,000,000,000,000,000,000 | 33 | fix: more robust address range checks in svhandler_flash_* |
int ec_GF2m_have_precompute_mult(const EC_GROUP *group)
{
return ec_wNAF_have_precompute_mult(group);
} | 0 | [
"CWE-310"
] | openssl | f9b6c0ba4c02497782f801e3c45688f3efaac55c | 211,605,566,523,798,960,000,000,000,000,000,000,000 | 4 | Fix for CVE-2014-0076
Fix for the attack described in the paper "Recovering OpenSSL
ECDSA Nonces Using the FLUSH+RELOAD Cache Side-channel Attack"
by Yuval Yarom and Naomi Benger. Details can be obtained from:
http://eprint.iacr.org/2014/140
Thanks to Yuval Yarom and Naomi Benger for discovering this
flaw and to Yuval Yarom for supplying a fix.
(cherry picked from commit 2198be3483259de374f91e57d247d0fc667aef29)
Conflicts:
CHANGES |
inline void OutputLerp32x4x1(const InterpolationCache<int32>& xs,
const int64 x_start, const int32 ys_ilerp,
const float min, const float max,
const qint32* const ys_input_lower_ptr,
const qint32* const ys_input_upper_ptr,
qint32* output_y_ptr) {
#ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON
const int64 xs_lower0 = xs.lower[x_start];
const int64 xs_upper0 = xs.upper[x_start];
const int32* const xs_ilerp0 = &xs.ilerp[x_start];
const int64 xs_lower1 = xs.lower[x_start + 1];
const int64 xs_upper1 = xs.upper[x_start + 1];
const int64 xs_lower2 = xs.lower[x_start + 2];
const int64 xs_upper2 = xs.upper[x_start + 2];
const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2];
const int64 xs_lower3 = xs.lower[x_start + 3];
const int64 xs_upper3 = xs.upper[x_start + 3];
const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp);
const int32x2_t x0x1 = ComputeLerpx2<RESOLUTION, false>(
ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0,
ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0,
ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1,
ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0,
y_lerpsx);
const int32x2_t x1x2 = ComputeLerpx2<RESOLUTION, false>(
ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2,
ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2,
ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3,
ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2,
y_lerpsx);
const int32x4_t x0x1x2x3 = vcombine_s32(x0x1, x1x2);
vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start), x0x1x2x3);
#else
for (int x = x_start; x < x_start + 4; ++x) {
OutputLerpForChannels<RESOLUTION, qint32, int32, int64>(
xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr,
output_y_ptr);
}
#endif
} | 0 | [
"CWE-787"
] | tensorflow | f6c40f0c6cbf00d46c7717a26419f2062f2f8694 | 62,252,091,825,278,620,000,000,000,000,000,000,000 | 46 | Validate min and max arguments to `QuantizedResizeBilinear`.
PiperOrigin-RevId: 369765091
Change-Id: I33be8b78273ab7d08b97541692fe05cb7f94963a |
static int inet_sk_reselect_saddr(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
__be32 old_saddr = inet->inet_saddr;
__be32 daddr = inet->inet_daddr;
struct flowi4 fl4;
struct rtable *rt;
__be32 new_saddr;
if (inet->opt && inet->opt->srr)
daddr = inet->opt->faddr;
/* Query new route. */
rt = ip_route_connect(&fl4, daddr, 0, RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if, sk->sk_protocol,
inet->inet_sport, inet->inet_dport, sk, false);
if (IS_ERR(rt))
return PTR_ERR(rt);
sk_setup_caps(sk, &rt->dst);
new_saddr = rt->rt_src;
if (new_saddr == old_saddr)
return 0;
if (sysctl_ip_dynaddr > 1) {
printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n",
__func__, &old_saddr, &new_saddr);
}
inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
/*
* XXX The only one ugly spot where we need to
* XXX really change the sockets identity after
* XXX it has entered the hashes. -DaveM
*
* Besides that, it does not check for connection
* uniqueness. Wait for troubles.
*/
__sk_prot_rehash(sk);
return 0;
} | 1 | [
"CWE-362"
] | linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 142,302,232,856,291,720,000,000,000,000,000,000,000 | 44 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
mono_type_get_object (MonoDomain *domain, MonoType *type)
{
MonoReflectionType *res;
MonoClass *klass = mono_class_from_mono_type (type);
/*we must avoid using @type as it might have come
* from a mono_metadata_type_dup and the caller
* expects that is can be freed.
* Using the right type from
*/
type = klass->byval_arg.byref == type->byref ? &klass->byval_arg : &klass->this_arg;
/* void is very common */
if (type->type == MONO_TYPE_VOID && domain->typeof_void)
return (MonoReflectionType*)domain->typeof_void;
/*
* If the vtable of the given class was already created, we can use
* the MonoType from there and avoid all locking and hash table lookups.
*
* We cannot do this for TypeBuilders as mono_reflection_create_runtime_class expects
* that the resulting object is different.
*/
if (type == &klass->byval_arg && !klass->image->dynamic) {
MonoVTable *vtable = mono_class_try_get_vtable (domain, klass);
if (vtable && vtable->type)
return vtable->type;
}
mono_loader_lock (); /*FIXME mono_class_init and mono_class_vtable acquire it*/
mono_domain_lock (domain);
if (!domain->type_hash)
domain->type_hash = mono_g_hash_table_new_type ((GHashFunc)mymono_metadata_type_hash,
(GCompareFunc)mymono_metadata_type_equal, MONO_HASH_VALUE_GC);
if ((res = mono_g_hash_table_lookup (domain->type_hash, type))) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
/* Create a MonoGenericClass object for instantiations of not finished TypeBuilders */
if ((type->type == MONO_TYPE_GENERICINST) && type->data.generic_class->is_dynamic && !type->data.generic_class->container_class->wastypebuilder) {
res = (MonoReflectionType *)mono_generic_class_get_object (domain, type);
mono_g_hash_table_insert (domain->type_hash, type, res);
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
if (!verify_safe_for_managed_space (type)) {
mono_domain_unlock (domain);
mono_loader_unlock ();
mono_raise_exception (mono_get_exception_invalid_operation ("This type cannot be propagated to managed space"));
}
if (klass->reflection_info && !klass->wastypebuilder) {
gboolean is_type_done = TRUE;
/* Generic parameters have reflection_info set but they are not finished together with their enclosing type.
* We must ensure that once a type is finished we don't return a GenericTypeParameterBuilder.
* We can't simply close the types as this will interfere with other parts of the generics machinery.
*/
if (klass->byval_arg.type == MONO_TYPE_MVAR || klass->byval_arg.type == MONO_TYPE_VAR) {
MonoGenericParam *gparam = klass->byval_arg.data.generic_param;
if (gparam->owner && gparam->owner->is_method) {
MonoMethod *method = gparam->owner->owner.method;
if (method && mono_class_get_generic_type_definition (method->klass)->wastypebuilder)
is_type_done = FALSE;
} else if (gparam->owner && !gparam->owner->is_method) {
MonoClass *klass = gparam->owner->owner.klass;
if (klass && mono_class_get_generic_type_definition (klass)->wastypebuilder)
is_type_done = FALSE;
}
}
/* g_assert_not_reached (); */
/* should this be considered an error condition? */
if (is_type_done && !type->byref) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return klass->reflection_info;
}
}
// FIXME: Get rid of this, do it in the icalls for Type
mono_class_init (klass);
#ifdef HAVE_SGEN_GC
res = (MonoReflectionType *)mono_gc_alloc_pinned_obj (mono_class_vtable (domain, mono_defaults.monotype_class), mono_class_instance_size (mono_defaults.monotype_class));
#else
res = (MonoReflectionType *)mono_object_new (domain, mono_defaults.monotype_class);
#endif
res->type = type;
mono_g_hash_table_insert (domain->type_hash, type, res);
if (type->type == MONO_TYPE_VOID)
domain->typeof_void = (MonoObject*)res;
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
} | 0 | [
"CWE-20"
] | mono | 4905ef1130feb26c3150b28b97e4a96752e0d399 | 271,724,186,116,668,240,000,000,000,000,000,000,000 | 99 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
bool OperandsBroadcastToOutputType(Type a, Type b, Type expected_output) {
Type output_element_type =
expected_output.cast<ShapedType>().getElementType();
Type broadcasted_type =
OpTrait::util::getBroadcastedType(a, b, output_element_type);
return broadcasted_type != Type() && broadcasted_type == expected_output;
} | 0 | [
"CWE-476",
"CWE-125"
] | tensorflow | d6b57f461b39fd1aa8c1b870f1b974aac3554955 | 75,826,766,606,927,520,000,000,000,000,000,000,000 | 7 | Prevent nullptr dereference in MLIR TFLite dialect/optimizer.
PiperOrigin-RevId: 387220762
Change-Id: Id136ef04bb3d36123b4685d316ae81a9ec924d6b |
void *dupClientReplyValue(void *o) {
clientReplyBlock *old = o;
clientReplyBlock *buf = zmalloc(sizeof(clientReplyBlock) + old->size);
memcpy(buf, o, sizeof(clientReplyBlock) + old->size);
return buf;
} | 0 | [
"CWE-770"
] | redis | 5674b0057ff2903d43eaff802017eddf37c360f8 | 213,876,687,249,010,670,000,000,000,000,000,000,000 | 6 | Prevent unauthenticated client from easily consuming lots of memory (CVE-2021-32675)
This change sets a low limit for multibulk and bulk length in the
protocol for unauthenticated connections, so that they can't easily
cause redis to allocate massive amounts of memory by sending just a few
characters on the network.
The new limits are 10 arguments of 16kb each (instead of 1m of 512mb) |
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src.val;
return X86EMUL_CONTINUE;
} | 0 | [] | kvm | e28ba7bb020f07193bc000453c8775e9d2c0dda7 | 306,798,817,243,183,640,000,000,000,000,000,000,000 | 5 | KVM: x86: fix missing checks in syscall emulation
On hosts without this patch, 32bit guests will crash (and 64bit guests
may behave in a wrong way) for example by simply executing following
nasm-demo-application:
[bits 32]
global _start
SECTION .text
_start: syscall
(I tested it with winxp and linux - both always crashed)
Disassembly of section .text:
00000000 <_start>:
0: 0f 05 syscall
The reason seems a missing "invalid opcode"-trap (int6) for the
syscall opcode "0f05", which is not available on Intel CPUs
within non-longmodes, as also on some AMD CPUs within legacy-mode.
(depending on CPU vendor, MSR_EFER and cpuid)
Because previous mentioned OSs may not engage corresponding
syscall target-registers (STAR, LSTAR, CSTAR), they remain
NULL and (non trapping) syscalls are leading to multiple
faults and finally crashs.
Depending on the architecture (AMD or Intel) pretended by
guests, various checks according to vendor's documentation
are implemented to overcome the current issue and behave
like the CPUs physical counterparts.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static inline bool IS_INODE(struct page *page)
{
struct f2fs_node *p = F2FS_NODE(page);
return RAW_IS_INODE(p);
} | 0 | [
"CWE-476"
] | linux | 4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6 | 188,397,120,844,808,080,000,000,000,000,000,000,000 | 6 | f2fs: support swap file w/ DIO
Signed-off-by: Jaegeuk Kim <[email protected]> |
EXPORTED int annotate_state_write(annotate_state_t *state,
const char *entry,
const char *userid,
const struct buf *value)
{
return write_entry(state->mailbox, state->uid,
entry, userid, value, /*ignorequota*/1,
state->silent, NULL, /*maywrite*/1);
} | 0 | [
"CWE-732"
] | cyrus-imapd | 621f9e41465b521399f691c241181300fab55995 | 165,258,834,236,959,000,000,000,000,000,000,000,000 | 9 | annotate: don't allow everyone to write shared server entries |
static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_node *n;
bool found = false;
rcu_read_lock();
list_for_each_entry_rcu(n, &tn->node_list, list) {
read_lock_bh(&n->lock);
if (!memcmp(id, n->peer_id, 16) &&
kref_get_unless_zero(&n->kref))
found = true;
read_unlock_bh(&n->lock);
if (found)
break;
}
rcu_read_unlock();
return found ? n : NULL;
} | 0 | [] | linux | 0217ed2848e8538bcf9172d97ed2eeb4a26041bb | 283,995,033,077,007,300,000,000,000,000,000,000,000 | 19 | tipc: better validate user input in tipc_nl_retrieve_key()
Before calling tipc_aead_key_size(ptr), we need to ensure
we have enough data to dereference ptr->keylen.
We probably also want to make sure tipc_aead_key_size()
wont overflow with malicious ptr->keylen values.
Syzbot reported:
BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x21c/0x280 lib/dump_stack.c:120
kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118
__msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197
__tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline]
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800
netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494
genl_rcv+0x63/0x80 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330
netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
RIP: 0023:0xf7f60549
Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
Uninit was created at:
kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline]
kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104
kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76
slab_alloc_node mm/slub.c:2907 [inline]
__kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527
__kmalloc_reserve net/core/skbuff.c:142 [inline]
__alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210
alloc_skb include/linux/skbuff.h:1099 [inline]
netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline]
netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Tuong Lien <[email protected]>
Cc: Jon Maloy <[email protected]>
Cc: Ying Xue <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
e1000_autoneg_timer(void *opaque)
{
E1000State *s = opaque;
if (!qemu_get_queue(s->nic)->link_down) {
e1000_autoneg_done(s);
set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
}
} | 0 | [
"CWE-835"
] | qemu | 1caff0340f49c93d535c6558a5138d20d475315c | 220,534,270,814,790,450,000,000,000,000,000,000,000 | 8 | e1000: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
continue_command()
{
c_token++;
if (iteration_depth == 0)
return;
/* Skip to end of current clause */
c_token = num_tokens;
/* request that remainder of this iteration be skipped also */
requested_continue = TRUE;
} | 0 | [
"CWE-415"
] | gnuplot | 052cbd17c3cbbc602ee080b2617d32a8417d7563 | 45,229,971,071,378,220,000,000,000,000,000,000,000 | 10 | successive failures of "set print <foo>" could cause double-free
Bug #2312 |
static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return NULL;
} | 0 | [
"CWE-119"
] | linux | 1be7107fbe18eed3e319a6c3e83c78254b693acb | 337,823,502,097,934,500,000,000,000,000,000,000,000 | 5 | mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]> |
tensorflow::Device* KernelAndDeviceFunc::OutputResourceDevice(int idx) const {
if (output_dtypes_[idx] == DT_RESOURCE) {
return output_devices_[idx];
}
return nullptr;
} | 0 | [
"CWE-476",
"CWE-369"
] | tensorflow | da8558533d925694483d2c136a9220d6d49d843c | 28,923,049,491,794,522,000,000,000,000,000,000,000 | 6 | Fix undefined behavior in `tf.raw_ops.Switch` in eager mode.
PiperOrigin-RevId: 332578058
Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9 |
static ZEND_RSRC_DTOR_FUNC(php_sqlite_db_dtor)
{
if (rsrc->ptr) {
struct php_sqlite_db *db = (struct php_sqlite_db*)rsrc->ptr;
sqlite_close(db->db);
zend_hash_destroy(&db->callbacks);
pefree(db, db->is_persistent);
rsrc->ptr = NULL;
}
} | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 280,846,916,061,875,560,000,000,000,000,000,000,000 | 14 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
version(void)
{
fprintf(stderr, "Version: %s\n\n", VERSION);
fprintf(stderr, "Compiled in settings:\n");
fprintf(stderr, " default config file \"%s\"\n", PATH_RADVD_CONF);
fprintf(stderr, " default pidfile \"%s\"\n", PATH_RADVD_PID);
fprintf(stderr, " default logfile \"%s\"\n", PATH_RADVD_LOG);
fprintf(stderr, " default syslog facility %d\n", LOG_FACILITY);
fprintf(stderr, "Please send bug reports or suggestions to %s.\n",
CONTACT_EMAIL);
exit(1);
} | 0 | [
"CWE-20"
] | radvd | 2c50375043186e133f15135f4c93ca964238ee60 | 134,902,133,445,979,030,000,000,000,000,000,000,000 | 13 | main() must fail on privsep_init() errors, it must not run
without privilege separation as privsep is expected. |
dissect_kafka_timestamp_delta(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, guint64 first_timestamp)
{
nstime_t nstime;
guint64 milliseconds;
guint64 val;
guint len;
proto_item *pi;
len = tvb_get_varint(tvb, offset, FT_VARINT_MAX_LEN, &val, ENC_VARINT_ZIGZAG);
milliseconds = first_timestamp + val;
nstime.secs = (time_t) (milliseconds / 1000);
nstime.nsecs = (int) ((milliseconds % 1000) * 1000000);
pi = proto_tree_add_time(tree, hf_item, tvb, offset, len, &nstime);
if (len == 0) {
//This will probably lead to a malformed packet, but it's better than not incrementing the offset
len = FT_VARINT_MAX_LEN;
expert_add_info(pinfo, pi, &ei_kafka_bad_varint);
}
return offset+len;
} | 0 | [
"CWE-401"
] | wireshark | f4374967bbf9c12746b8ec3cd54dddada9dd353e | 183,463,795,973,387,660,000,000,000,000,000,000,000 | 23 | Kafka: Limit our decompression size.
Don't assume that the Internet has our best interests at heart when it
gives us the size of our decompression buffer. Assign an arbitrary limit
of 50 MB.
This fixes #16739 in that it takes care of
** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start"
which is different from the original error output. It looks like *that*
might have taken care of in one of the other recent Kafka bug fixes.
The decompression routines return a success or failure status. Use
gbooleans instead of ints for that. |
static ssize_t portio_start_show(struct uio_port *port, char *buf)
{
return sprintf(buf, "0x%lx\n", port->start);
} | 0 | [
"CWE-119",
"CWE-189",
"CWE-703"
] | linux | 7314e613d5ff9f0934f7a0f74ed7973b903315d1 | 315,069,041,804,448,080,000,000,000,000,000,000,000 | 4 | Fix a few incorrectly checked [io_]remap_pfn_range() calls
Nico Golde reports a few straggling uses of [io_]remap_pfn_range() that
really should use the vm_iomap_memory() helper. This trivially converts
two of them to the helper, and comments about why the third one really
needs to continue to use remap_pfn_range(), and adds the missing size
check.
Reported-by: Nico Golde <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]. |
static void mon_text_complete(void *data, struct urb *urb, int status)
{
struct mon_reader_text *rp = data;
mon_text_event(rp, urb, 'C', status);
} | 0 | [
"CWE-787"
] | linux | a5f596830e27e15f7a0ecd6be55e433d776986d8 | 276,632,524,379,789,700,000,000,000,000,000,000,000 | 5 | usb: usbmon: Read text within supplied buffer size
This change fixes buffer overflows and silent data corruption with the
usbmon device driver text file read operations.
Signed-off-by: Fredrik Noring <[email protected]>
Signed-off-by: Pete Zaitcev <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
geterrposition(void)
{
ErrorData *edata = &errordata[errordata_stack_depth];
/* we don't bother incrementing recursion_depth */
CHECK_STACK_DEPTH();
return edata->cursorpos;
} | 0 | [
"CWE-89"
] | postgres | 2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b | 109,355,290,890,845,090,000,000,000,000,000,000,000 | 9 | Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244 |
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out)
{
out->key = in->key;
out->uid = from_kuid_munged(current_user_ns(), in->uid);
out->gid = from_kgid_munged(current_user_ns(), in->gid);
out->cuid = from_kuid_munged(current_user_ns(), in->cuid);
out->cgid = from_kgid_munged(current_user_ns(), in->cgid);
out->mode = in->mode;
out->seq = in->seq;
} | 0 | [
"CWE-362",
"CWE-401"
] | linux | b9a532277938798b53178d5a66af6e2915cb27cf | 96,247,307,640,036,400,000,000,000,000,000,000,000 | 10 | Initialize msg/shm IPC objects before doing ipc_addid()
As reported by Dmitry Vyukov, we really shouldn't do ipc_addid() before
having initialized the IPC object state. Yes, we initialize the IPC
object in a locked state, but with all the lockless RCU lookup work,
that IPC object lock no longer means that the state cannot be seen.
We already did this for the IPC semaphore code (see commit e8577d1f0329:
"ipc/sem.c: fully initialize sem_array before making it visible") but we
clearly forgot about msg and shm.
Reported-by: Dmitry Vyukov <[email protected]>
Cc: Manfred Spraul <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
static bool msr_mtrr_valid(unsigned msr)
{
switch (msr) {
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
case MSR_MTRRfix64K_00000:
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
case MSR_MTRRdefType:
case MSR_IA32_CR_PAT:
return true;
case 0x2f8:
return true;
}
return false;
} | 1 | [
"CWE-284"
] | linux | 9842df62004f366b9fed2423e24df10542ee0dc5 | 311,442,551,262,007,960,000,000,000,000,000,000,000 | 23 | KVM: MTRR: remove MSR 0x2f8
MSR 0x2f8 accessed the 124th Variable Range MTRR ever since MTRR support
was introduced by 9ba075a664df ("KVM: MTRR support").
0x2f8 became harmful when 910a6aae4e2e ("KVM: MTRR: exactly define the
size of variable MTRRs") shrinked the array of VR MTRRs from 256 to 8,
which made access to index 124 out of bounds. The surrounding code only
WARNs in this situation, thus the guest gained a limited read/write
access to struct kvm_arch_vcpu.
0x2f8 is not a valid VR MTRR MSR, because KVM has/advertises only 16 VR
MTRR MSRs, 0x200-0x20f. Every VR MTRR is set up using two MSRs, 0x2f8
was treated as a PHYSBASE and 0x2f9 would be its PHYSMASK, but 0x2f9 was
not implemented in KVM, therefore 0x2f8 could never do anything useful
and getting rid of it is safe.
This fixes CVE-2016-3713.
Fixes: 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs")
Cc: [email protected]
Reported-by: David Matlack <[email protected]>
Signed-off-by: Andy Honig <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
bool res= m_value.to_TIME(thd, ltime, fuzzydate);
DBUG_ASSERT(!res);
return res;
} | 0 | [
"CWE-617"
] | server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 129,291,774,359,142,300,000,000,000,000,000,000,000 | 6 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
TransactionHolder() {
ServerDB::db->transaction();
qsqQuery = new QSqlQuery();
} | 0 | [
"CWE-20"
] | mumble | 6b33dda344f89e5a039b7d79eb43925040654242 | 200,554,543,852,548,470,000,000,000,000,000,000,000 | 4 | Don't crash on long usernames |
PgDatabase *find_database(const char *name)
{
struct List *item, *tmp;
PgDatabase *db;
statlist_for_each(item, &database_list) {
db = container_of(item, PgDatabase, head);
if (strcmp(db->name, name) == 0)
return db;
}
/* also trying to find in idle autodatabases list */
statlist_for_each_safe(item, &autodatabase_idle_list, tmp) {
db = container_of(item, PgDatabase, head);
if (strcmp(db->name, name) == 0) {
db->inactive_time = 0;
statlist_remove(&autodatabase_idle_list, &db->head);
put_in_order(&db->head, &database_list, cmp_database);
return db;
}
}
return NULL;
} | 0 | [] | pgbouncer | 4b92112b820830b30cd7bc91bef3dd8f35305525 | 332,449,668,979,649,500,000,000,000,000,000,000,000 | 21 | add_database: fail gracefully if too long db name
Truncating & adding can lead to fatal() later.
It was not an issue before, but with audodb (* in [databases] section)
the database name can some from network, thus allowing remote shutdown.. |
xmlParseContent(xmlParserCtxtPtr ctxt) {
GROW;
while ((RAW != 0) &&
((RAW != '<') || (NXT(1) != '/')) &&
(ctxt->instate != XML_PARSER_EOF)) {
const xmlChar *test = CUR_PTR;
unsigned int cons = ctxt->input->consumed;
const xmlChar *cur = ctxt->input->cur;
/*
* First case : a Processing Instruction.
*/
if ((*cur == '<') && (cur[1] == '?')) {
xmlParsePI(ctxt);
}
/*
* Second case : a CDSection
*/
/* 2.6.0 test was *cur not RAW */
else if (CMP9(CUR_PTR, '<', '!', '[', 'C', 'D', 'A', 'T', 'A', '[')) {
xmlParseCDSect(ctxt);
}
/*
* Third case : a comment
*/
else if ((*cur == '<') && (NXT(1) == '!') &&
(NXT(2) == '-') && (NXT(3) == '-')) {
xmlParseComment(ctxt);
ctxt->instate = XML_PARSER_CONTENT;
}
/*
* Fourth case : a sub-element.
*/
else if (*cur == '<') {
xmlParseElement(ctxt);
}
/*
* Fifth case : a reference. If if has not been resolved,
* parsing returns it's Name, create the node
*/
else if (*cur == '&') {
xmlParseReference(ctxt);
}
/*
* Last case, text. Note that References are handled directly.
*/
else {
xmlParseCharData(ctxt, 0);
}
GROW;
/*
* Pop-up of finished entities.
*/
while ((RAW == 0) && (ctxt->inputNr > 1))
xmlPopInput(ctxt);
SHRINK;
if ((cons == ctxt->input->consumed) && (test == CUR_PTR)) {
xmlFatalErr(ctxt, XML_ERR_INTERNAL_ERROR,
"detected an error in element content\n");
xmlHaltParser(ctxt);
break;
}
}
} | 0 | [
"CWE-119"
] | libxml2 | 35bcb1d758ed70aa7b257c9c3b3ff55e54e3d0da | 242,635,796,801,064,440,000,000,000,000,000,000,000 | 72 | Detect incoherency on GROW
the current pointer to the input has to be between the base and end
if not stop everything we have an internal state error. |
static double nan() {
#ifdef NAN
return (double)NAN;
#else
const double val_nan = -std::sqrt(-1.); return val_nan;
#endif
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 201,936,161,297,186,150,000,000,000,000,000,000,000 | 7 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen,
uint16_t qclass, time_t now, uint16_t searchtype, int stripfront,
int noexpiredabove, uint8_t* expiretop, size_t expiretoplen)
{
struct ub_packed_rrset_key *rrset;
uint8_t lablen;
if(stripfront) {
/* strip off so that DNAMEs have strict subdomain match */
lablen = *qname;
qname += lablen + 1;
qnamelen -= lablen + 1;
}
/* snip off front part of qname until the type is found */
while(qnamelen > 0) {
if((rrset = rrset_cache_lookup(env->rrset_cache, qname,
qnamelen, searchtype, qclass, 0, now, 0))) {
uint8_t* origqname = qname;
size_t origqnamelen = qnamelen;
if(!noexpiredabove)
return rrset;
/* if expiretop set, do not look above it, but
* qname is equal, so the just found result is also
* the nonexpired above part. */
if(expiretop && qnamelen == expiretoplen &&
query_dname_compare(qname, expiretop)==0)
return rrset;
/* check for expiry, but we have to let go of the rrset
* for the lock ordering */
lock_rw_unlock(&rrset->entry.lock);
/* the expired_above function always takes off one
* label (if qnamelen>0) and returns the final qname
* where it searched, so we can continue from there
* turning the O N*N search into O N. */
if(!rrset_expired_above(env, &qname, &qnamelen,
searchtype, qclass, now, expiretop,
expiretoplen)) {
/* we want to return rrset, but it may be
* gone from cache, if so, just loop like
* it was not in the cache in the first place.
*/
if((rrset = rrset_cache_lookup(env->
rrset_cache, origqname, origqnamelen,
searchtype, qclass, 0, now, 0))) {
return rrset;
}
}
log_nametypeclass(VERB_ALGO, "ignoring rrset because expired rrsets exist above it", origqname, searchtype, qclass);
continue;
}
/* snip off front label */
lablen = *qname;
qname += lablen + 1;
qnamelen -= lablen + 1;
}
return NULL;
} | 0 | [
"CWE-613",
"CWE-703"
] | unbound | f6753a0f1018133df552347a199e0362fc1dac68 | 174,599,950,550,841,900,000,000,000,000,000,000,000 | 59 | - Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699. |
register_update_monitor (PortalFlatpakUpdateMonitor *monitor,
const char *obj_path)
{
G_LOCK (update_monitors);
g_hash_table_insert (update_monitors, g_strdup (obj_path), g_object_ref (monitor));
/* Trigger update timeout if needed */
if (update_monitors_timeout == 0 && !update_monitors_timeout_running_thread)
update_monitors_timeout = g_timeout_add_seconds (opt_poll_timeout, check_all_for_updates_cb, NULL);
G_UNLOCK (update_monitors);
} | 0 | [
"CWE-94",
"CWE-74"
] | flatpak | aeb6a7ab0abaac4a8f4ad98b3df476d9de6b8bd4 | 137,726,271,539,670,350,000,000,000,000,000,000,000 | 13 | portal: Convert --env in extra-args into --env-fd
This hides overridden variables from the command-line, which means
processes running under other uids can't see them in /proc/*/cmdline,
which might be important if they contain secrets.
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2 |
static void sdhci_reset(SDHCIState *s)
{
DeviceState *dev = DEVICE(s);
timer_del(s->insert_timer);
timer_del(s->transfer_timer);
/* Set all registers to 0. Capabilities/Version registers are not cleared
* and assumed to always preserve their value, given to them during
* initialization */
memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad);
/* Reset other state based on current card insertion/readonly status */
sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus));
sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus));
s->data_count = 0;
s->stopped_state = sdhc_not_stopped;
s->pending_insert_state = false;
} | 0 | [
"CWE-119"
] | qemu | dfba99f17feb6d4a129da19d38df1bcd8579d1c3 | 13,149,480,386,913,570,000,000,000,000,000,000,000 | 20 | hw/sd/sdhci: Fix DMA Transfer Block Size field
The 'Transfer Block Size' field is 12-bit wide.
See section '2.2.2. Block Size Register (Offset 004h)' in datasheet.
Two different bug reproducer available:
- https://bugs.launchpad.net/qemu/+bug/1892960
- https://ruhr-uni-bochum.sciebo.de/s/NNWP2GfwzYKeKwE?path=%2Fsdhci_oob_write1
Cc: [email protected]
Buglink: https://bugs.launchpad.net/qemu/+bug/1892960
Fixes: d7dfca0807a ("hw/sdhci: introduce standard SD host controller")
Reported-by: Alexander Bulekov <[email protected]>
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Prasad J Pandit <[email protected]>
Tested-by: Alexander Bulekov <[email protected]>
Message-Id: <[email protected]> |
int nf_ct_frag6_init(void)
{
int ret = 0;
nf_frags.hashfn = nf_hashfn;
nf_frags.constructor = ip6_frag_init;
nf_frags.destructor = NULL;
nf_frags.skb_free = nf_skb_free;
nf_frags.qsize = sizeof(struct frag_queue);
nf_frags.match = ip6_frag_match;
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_frags.secret_interval = 10 * 60 * HZ;
inet_frags_init(&nf_frags);
ret = register_pernet_subsys(&nf_ct_net_ops);
if (ret)
inet_frags_fini(&nf_frags);
return ret;
} | 0 | [] | linux | 3ef0eb0db4bf92c6d2510fe5c4dc51852746f206 | 61,709,244,321,490,670,000,000,000,000,000,000,000 | 20 | net: frag, move LRU list maintenance outside of rwlock
Updating the fragmentation queues LRU (Least-Recently-Used) list,
required taking the hash writer lock. However, the LRU list isn't
tied to the hash at all, so we can use a separate lock for it.
Original-idea-by: Florian Westphal <[email protected]>
Signed-off-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
innodb_store(
/*=========*/
ENGINE_HANDLE* handle, /*!< in: Engine Handle */
const void* cookie, /*!< in: connection cookie */
item* item, /*!< out: result to fill */
uint64_t* cas, /*!< in: cas value */
ENGINE_STORE_OPERATION op, /*!< in: type of operation */
uint16_t vbucket __attribute__((unused)))
/*!< in: bucket, used by default
engine only */
{
struct innodb_engine* innodb_eng = innodb_handle(handle);
uint16_t len = hash_item_get_key_len(item);
char* value = hash_item_get_key(item);
uint64_t exptime = hash_item_get_exp(item);
uint64_t flags = hash_item_get_flag(item);
ENGINE_ERROR_CODE result;
uint64_t input_cas;
innodb_conn_data_t* conn_data;
meta_cfg_info_t* meta_info = innodb_eng->meta_info;
uint32_t val_len = ((hash_item*)item)->nbytes;
size_t key_len = len;
ENGINE_ERROR_CODE err_ret = ENGINE_SUCCESS;
if (meta_info->set_option == META_CACHE_OPT_DISABLE) {
return(ENGINE_SUCCESS);
}
if (meta_info->set_option == META_CACHE_OPT_DEFAULT
|| meta_info->set_option == META_CACHE_OPT_MIX) {
result = store_item(default_handle(innodb_eng), item, cas,
op, cookie);
if (meta_info->set_option == META_CACHE_OPT_DEFAULT) {
return(result);
}
}
err_ret = check_key_name_for_map_switch(handle, cookie,
value, &key_len);
if (err_ret != ENGINE_SUCCESS) {
return(err_ret);
}
/* If no key is provided, return here */
if (key_len <= 0) {
return(ENGINE_NOT_STORED);
}
conn_data = innodb_conn_init(innodb_eng, cookie, CONN_MODE_WRITE,
IB_LOCK_X, false, NULL);
if (!conn_data) {
return(ENGINE_NOT_STORED);
}
input_cas = hash_item_get_cas(item);
result = innodb_api_store(innodb_eng, conn_data, value + len - key_len,
key_len, val_len, exptime, cas, input_cas,
flags, op);
innodb_api_cursor_reset(innodb_eng, conn_data, CONN_OP_WRITE,
result == ENGINE_SUCCESS);
return(result);
} | 0 | [] | mysql-server | 659514dc83299a7d8c7defeb543be4339fbe1ee1 | 75,580,833,110,670,770,000,000,000,000,000,000,000 | 67 | Bug #25147515 SET DAEMON_MEMCACHED_R_BATCH_SIZE GREATER THAN 1 MAKE MYSQLD CRASHED
PROBLEM
-------
User starts a "get" the the connection with SET DAEMON_MEMCACHED_R_BATCH_SIZE= 5
and with binlog on. Since "get" is a read transaction this connection is not
allocated any conn_data->thd (which is used for bin log commit).The connection
is kept open. Innodb background thread tries to commit transactions which have
exceed CONN_IDLE_TIME_TO_BK_COMMIT and whose read batch size > 0, when it finds
this connection it tries to attach conn_data->thd to bin log thread.Since
conn_data->thd is NULL it crashes.
FIX
---
Check conn_data->thd value before attaching it to binlog thread. |
PHP_METHOD(Phar, addEmptyDir)
{
char *dirname;
size_t dirname_len;
PHAR_ARCHIVE_OBJECT();
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &dirname, &dirname_len) == FAILURE) {
return;
}
if (dirname_len >= sizeof(".phar")-1 && !memcmp(dirname, ".phar", sizeof(".phar")-1)) {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Cannot create a directory in magic \".phar\" directory");
return;
}
phar_mkdir(&phar_obj->archive, dirname, dirname_len);
} | 1 | [
"CWE-20"
] | php-src | 1e9b175204e3286d64dfd6c9f09151c31b5e099a | 112,124,004,199,892,240,000,000,000,000,000,000,000 | 18 | Fix bug #71860: Require valid paths for phar filenames |
TIFFWriteDirectoryTagSampleformatArray(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, uint32 count, double* value)
{
static const char module[] = "TIFFWriteDirectoryTagSampleformatArray";
void* conv;
uint32 i;
int ok;
conv = _TIFFmalloc(count*sizeof(double));
if (conv == NULL)
{
TIFFErrorExt(tif->tif_clientdata, module, "Out of memory");
return (0);
}
switch (tif->tif_dir.td_sampleformat)
{
case SAMPLEFORMAT_IEEEFP:
if (tif->tif_dir.td_bitspersample<=32)
{
for (i = 0; i < count; ++i)
((float*)conv)[i] = TIFFClampDoubleToFloat(value[i]);
ok = TIFFWriteDirectoryTagFloatArray(tif,ndir,dir,tag,count,(float*)conv);
}
else
{
ok = TIFFWriteDirectoryTagDoubleArray(tif,ndir,dir,tag,count,value);
}
break;
case SAMPLEFORMAT_INT:
if (tif->tif_dir.td_bitspersample<=8)
{
for (i = 0; i < count; ++i)
((int8*)conv)[i] = TIFFClampDoubleToInt8(value[i]);
ok = TIFFWriteDirectoryTagSbyteArray(tif,ndir,dir,tag,count,(int8*)conv);
}
else if (tif->tif_dir.td_bitspersample<=16)
{
for (i = 0; i < count; ++i)
((int16*)conv)[i] = TIFFClampDoubleToInt16(value[i]);
ok = TIFFWriteDirectoryTagSshortArray(tif,ndir,dir,tag,count,(int16*)conv);
}
else
{
for (i = 0; i < count; ++i)
((int32*)conv)[i] = TIFFClampDoubleToInt32(value[i]);
ok = TIFFWriteDirectoryTagSlongArray(tif,ndir,dir,tag,count,(int32*)conv);
}
break;
case SAMPLEFORMAT_UINT:
if (tif->tif_dir.td_bitspersample<=8)
{
for (i = 0; i < count; ++i)
((uint8*)conv)[i] = TIFFClampDoubleToUInt8(value[i]);
ok = TIFFWriteDirectoryTagByteArray(tif,ndir,dir,tag,count,(uint8*)conv);
}
else if (tif->tif_dir.td_bitspersample<=16)
{
for (i = 0; i < count; ++i)
((uint16*)conv)[i] = TIFFClampDoubleToUInt16(value[i]);
ok = TIFFWriteDirectoryTagShortArray(tif,ndir,dir,tag,count,(uint16*)conv);
}
else
{
for (i = 0; i < count; ++i)
((uint32*)conv)[i] = TIFFClampDoubleToUInt32(value[i]);
ok = TIFFWriteDirectoryTagLongArray(tif,ndir,dir,tag,count,(uint32*)conv);
}
break;
default:
ok = 0;
}
_TIFFfree(conv);
return (ok);
} | 0 | [
"CWE-617"
] | libtiff | de144fd228e4be8aa484c3caf3d814b6fa88c6d9 | 162,506,996,761,894,620,000,000,000,000,000,000,000 | 74 | TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963 |
static void rng_egd_class_init(ObjectClass *klass, void *data)
{
RngBackendClass *rbc = RNG_BACKEND_CLASS(klass);
rbc->request_entropy = rng_egd_request_entropy;
rbc->opened = rng_egd_opened;
} | 0 | [
"CWE-119"
] | qemu | 60253ed1e6ec6d8e5ef2efe7bf755f475dce9956 | 201,202,626,614,674,160,000,000,000,000,000,000,000 | 7 | rng: add request queue support to rng-random
Requests are now created in the RngBackend parent class and the
code path is shared by both rng-egd and rng-random.
This commit fixes the rng-random implementation which processed
only one request at a time and simply discarded all but the most
recent one. In the guest this manifested as delayed completion
of reads from virtio-rng, i.e. a read was completed only after
another read was issued.
By switching rng-random to use the same request queue as rng-egd,
the unsafe stack-based allocation of the entropy buffer is
eliminated and replaced with g_malloc.
Signed-off-by: Ladi Prosek <[email protected]>
Reviewed-by: Amit Shah <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Amit Shah <[email protected]> |
gdk_pixbuf_new_subpixbuf (GdkPixbuf *src_pixbuf,
int src_x,
int src_y,
int width,
int height)
{
guchar *pixels;
GdkPixbuf *sub;
g_return_val_if_fail (GDK_IS_PIXBUF (src_pixbuf), NULL);
g_return_val_if_fail (src_x >= 0 && src_x + width <= src_pixbuf->width, NULL);
g_return_val_if_fail (src_y >= 0 && src_y + height <= src_pixbuf->height, NULL);
pixels = (gdk_pixbuf_get_pixels (src_pixbuf)
+ src_y * src_pixbuf->rowstride
+ src_x * src_pixbuf->n_channels);
sub = gdk_pixbuf_new_from_data (pixels,
src_pixbuf->colorspace,
src_pixbuf->has_alpha,
src_pixbuf->bits_per_sample,
width, height,
src_pixbuf->rowstride,
NULL, NULL);
/* Keep a reference to src_pixbuf */
g_object_ref (src_pixbuf);
g_object_set_qdata_full (G_OBJECT (sub),
g_quark_from_static_string ("gdk-pixbuf-subpixbuf-src"),
src_pixbuf,
(GDestroyNotify) g_object_unref);
return sub;
} | 0 | [] | gdk-pixbuf | deb78d971c4bcb9e3ccbb71e7925bc6baa707188 | 329,761,908,941,371,900,000,000,000,000,000,000,000 | 35 | Use g_try_malloc_n where it makes sense
This lets us avoid some manual overflow checks. |
static GF_ProtectionSchemeInfoBox *isom_get_sinf_entry(GF_TrackBox *trak, u32 sampleDescriptionIndex, u32 scheme_type, GF_SampleEntryBox **out_sea)
{
u32 i=0;
GF_SampleEntryBox *sea=NULL;
GF_ProtectionSchemeInfoBox *sinf;
Media_GetSampleDesc(trak->Media, sampleDescriptionIndex, &sea, NULL);
if (!sea) return NULL;
i = 0;
while ((sinf = (GF_ProtectionSchemeInfoBox*)gf_list_enum(sea->child_boxes, &i))) {
if (sinf->type != GF_ISOM_BOX_TYPE_SINF) continue;
if (sinf->original_format && sinf->scheme_type && sinf->info) {
if (!scheme_type || (sinf->scheme_type->scheme_type == scheme_type)) {
if (out_sea)
*out_sea = sea;
return sinf;
}
}
}
return NULL;
} | 0 | [
"CWE-476"
] | gpac | 3b84ffcbacf144ce35650df958432f472b6483f8 | 100,988,039,882,580,680,000,000,000,000,000,000,000 | 23 | fixed #1735 |
get_max_sample_size_absolute(void)
{
return (int) networkstatus_get_param(NULL, "guard-max-sample-size",
DFLT_MAX_SAMPLE_SIZE,
1, INT32_MAX);
} | 0 | [
"CWE-200"
] | tor | 665baf5ed5c6186d973c46cdea165c0548027350 | 36,499,354,537,593,900,000,000,000,000,000,000,000 | 6 | Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377. |
void* OwnedImpl::linearize(uint32_t size) {
RELEASE_ASSERT(size <= length(), "Linearize size exceeds buffer size");
if (slices_.empty()) {
return nullptr;
}
uint64_t linearized_size = 0;
uint64_t num_slices_to_linearize = 0;
for (const auto& slice : slices_) {
num_slices_to_linearize++;
linearized_size += slice->dataSize();
if (linearized_size >= size) {
break;
}
}
if (num_slices_to_linearize > 1) {
auto new_slice = OwnedSlice::create(linearized_size);
uint64_t bytes_copied = 0;
Slice::Reservation reservation = new_slice->reserve(linearized_size);
ASSERT(reservation.mem_ != nullptr);
ASSERT(reservation.len_ == linearized_size);
auto dest = static_cast<uint8_t*>(reservation.mem_);
do {
uint64_t data_size = slices_.front()->dataSize();
memcpy(dest, slices_.front()->data(), data_size);
bytes_copied += data_size;
dest += data_size;
slices_.pop_front();
} while (bytes_copied < linearized_size);
ASSERT(dest == static_cast<const uint8_t*>(reservation.mem_) + linearized_size);
new_slice->commit(reservation);
slices_.emplace_front(std::move(new_slice));
}
return slices_.front()->data();
} | 1 | [
"CWE-401"
] | envoy | 5eba69a1f375413fb93fab4173f9c393ac8c2818 | 171,690,173,566,722,950,000,000,000,000,000,000,000 | 34 | [buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]> |
cmd_pipe_pane_write_callback(__unused struct bufferevent *bufev, void *data)
{
struct window_pane *wp = data;
log_debug("%%%u pipe empty", wp->id);
if (window_pane_destroy_ready(wp))
server_destroy_pane(wp, 1);
} | 0 | [] | src | b32e1d34e10a0da806823f57f02a4ae6e93d756e | 144,252,540,149,079,370,000,000,000,000,000,000,000 | 9 | evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547. |
static void cirrus_write_hidden_dac(CirrusVGAState * s, int reg_value)
{
if (s->cirrus_hidden_dac_lockindex == 4) {
s->cirrus_hidden_dac_data = reg_value;
#if defined(DEBUG_CIRRUS)
printf("cirrus: outport hidden DAC, value %02x\n", reg_value);
#endif
}
s->cirrus_hidden_dac_lockindex = 0;
} | 0 | [
"CWE-787"
] | qemu | b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef | 75,705,275,470,431,860,000,000,000,000,000,000,000 | 10 | CVE-2007-1320 - Cirrus LGD-54XX "bitblt" heap overflow
I have just noticed that patch for CVE-2007-1320 has never been applied
to the QEMU CVS. Please find it below.
| Multiple heap-based buffer overflows in the cirrus_invalidate_region
| function in the Cirrus VGA extension in QEMU 0.8.2, as used in Xen and
| possibly other products, might allow local users to execute arbitrary
| code via unspecified vectors related to "attempting to mark
| non-existent regions as dirty," aka the "bitblt" heap overflow.
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4340 c046a42c-6fe2-441c-8c8c-71466251a162 |
static void tq_freezethaw(struct thread_q *tq, bool frozen)
{
mutex_lock(&tq->mutex);
tq->frozen = frozen;
pthread_cond_signal(&tq->cond);
mutex_unlock(&tq->mutex);
} | 0 | [
"CWE-20",
"CWE-703"
] | sgminer | 910c36089940e81fb85c65b8e63dcd2fac71470c | 248,913,955,051,531,280,000,000,000,000,000,000,000 | 7 | stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime.
Might have introduced a memory leak, don't have time to check. :(
Should the other hex2bin()'s be checked?
Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this. |
static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer *head;
int ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
io_ring_submit_lock(ctx, !force_nonblock);
lockdep_assert_held(&ctx->uring_lock);
ret = -ENOENT;
head = xa_load(&ctx->io_buffers, p->bgid);
if (head)
ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
if (ret < 0)
req_set_fail(req);
/* complete before unlock, IOPOLL may need the lock */
__io_req_complete(req, issue_flags, ret, 0);
io_ring_submit_unlock(ctx, !force_nonblock);
return 0;
} | 0 | [
"CWE-125"
] | linux | 89c2b3b74918200e46699338d7bcc19b1ea12110 | 68,480,571,562,033,700,000,000,000,000,000,000,000 | 24 | io_uring: reexpand under-reexpanded iters
[ 74.211232] BUG: KASAN: stack-out-of-bounds in iov_iter_revert+0x809/0x900
[ 74.212778] Read of size 8 at addr ffff888025dc78b8 by task
syz-executor.0/828
[ 74.214756] CPU: 0 PID: 828 Comm: syz-executor.0 Not tainted
5.14.0-rc3-next-20210730 #1
[ 74.216525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 74.219033] Call Trace:
[ 74.219683] dump_stack_lvl+0x8b/0xb3
[ 74.220706] print_address_description.constprop.0+0x1f/0x140
[ 74.224226] kasan_report.cold+0x7f/0x11b
[ 74.226085] iov_iter_revert+0x809/0x900
[ 74.227960] io_write+0x57d/0xe40
[ 74.232647] io_issue_sqe+0x4da/0x6a80
[ 74.242578] __io_queue_sqe+0x1ac/0xe60
[ 74.245358] io_submit_sqes+0x3f6e/0x76a0
[ 74.248207] __do_sys_io_uring_enter+0x90c/0x1a20
[ 74.257167] do_syscall_64+0x3b/0x90
[ 74.257984] entry_SYSCALL_64_after_hwframe+0x44/0xae
old_size = iov_iter_count();
...
iov_iter_revert(old_size - iov_iter_count());
If iov_iter_revert() is done base on the initial size as above, and the
iter is truncated and not reexpanded in the middle, it miscalculates
borders causing problems. This trace is due to no one reexpanding after
generic_write_checks().
Now iters store how many bytes has been truncated, so reexpand them to
the initial state right before reverting.
Cc: [email protected]
Reported-by: Palash Oswal <[email protected]>
Reported-by: Sudip Mukherjee <[email protected]>
Reported-and-tested-by: [email protected]
Signed-off-by: Pavel Begunkov <[email protected]>
Signed-off-by: Al Viro <[email protected]> |
static inline s16 fixp_new16(s16 a)
{
return ((s32)a) >> (16 - FRAC_N);
} | 0 | [
"CWE-416"
] | linux | fa3a5a1880c91bb92594ad42dfe9eedad7996b86 | 220,738,562,585,515,670,000,000,000,000,000,000,000 | 4 | Input: ff-memless - kill timer in destroy()
No timer must be left running when the device goes away.
Signed-off-by: Oliver Neukum <[email protected]>
Reported-and-tested-by: [email protected]
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Dmitry Torokhov <[email protected]> |
void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps)
{
return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
SCM_TSTAMP_SND); | 0 | [
"CWE-703",
"CWE-125"
] | linux | 8605330aac5a5785630aec8f64378a54891937cc | 190,325,502,105,739,270,000,000,000,000,000,000,000 | 6 | tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs
__sock_recv_timestamp can be called for both normal skbs (for
receive timestamps) and for skbs on the error queue (for transmit
timestamps).
Commit 1c885808e456
(tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING)
assumes any skb passed to __sock_recv_timestamp are from
the error queue, containing OPT_STATS in the content of the skb.
This results in accessing invalid memory or generating junk
data.
To fix this, set skb->pkt_type to PACKET_OUTGOING for packets
on the error queue. This is safe because on the receive path
on local sockets skb->pkt_type is never set to PACKET_OUTGOING.
With that, copy OPT_STATS from a packet, only if its pkt_type
is PACKET_OUTGOING.
Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING")
Reported-by: JongHwan Kim <[email protected]>
Signed-off-by: Soheil Hassas Yeganeh <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
attr_writer(mrb_state *mrb, mrb_value obj)
{
mrb_value name = mrb_proc_cfunc_env_get(mrb, 0);
mrb_value val;
mrb_get_args(mrb, "o", &val);
mrb_iv_set(mrb, obj, to_sym(mrb, name), val);
return val;
} | 0 | [
"CWE-476",
"CWE-415"
] | mruby | faa4eaf6803bd11669bc324b4c34e7162286bfa3 | 291,345,124,765,866,230,000,000,000,000,000,000,000 | 9 | `mrb_class_real()` did not work for `BasicObject`; fix #4037 |
spnego_gss_export_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = *(spnego_gss_ctx_id_t *)context_handle;
/* We don't currently support exporting partially established
* contexts. */
if (!sc->opened)
return GSS_S_UNAVAILABLE;
ret = gss_export_sec_context(minor_status,
&sc->ctx_handle,
interprocess_token);
if (sc->ctx_handle == GSS_C_NO_CONTEXT) {
release_spnego_ctx(&sc);
*context_handle = GSS_C_NO_CONTEXT;
}
return (ret);
} | 0 | [
"CWE-18",
"CWE-763"
] | krb5 | b51b33f2bc5d1497ddf5bd107f791c101695000d | 296,891,526,999,273,350,000,000,000,000,000,000,000 | 22 | Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup |
rpc_init_mempool(void)
{
/*
* The following is not strictly a mempool initialisation,
* but there is no harm in doing it here
*/
rpc_init_wait_queue(&delay_queue, "delayq");
if (!rpciod_start())
goto err_nomem;
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
rpc_task_slabp);
if (!rpc_task_mempool)
goto err_nomem;
rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
return 0;
err_nomem:
rpc_destroy_mempool();
return -ENOMEM;
} | 0 | [
"CWE-400",
"CWE-399",
"CWE-703"
] | linux | 0b760113a3a155269a3fba93a409c640031dd68f | 40,017,319,184,471,020,000,000,000,000,000,000,000 | 35 | NLM: Don't hang forever on NLM unlock requests
If the NLM daemon is killed on the NFS server, we can currently end up
hanging forever on an 'unlock' request, instead of aborting. Basically,
if the rpcbind request fails, or the server keeps returning garbage, we
really want to quit instead of retrying.
Tested-by: Vasily Averin <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
Cc: [email protected] |
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
{
struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma;
int err;
shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
PAGE_ALIGN(eb->batch_len));
if (IS_ERR(shadow_batch_obj))
return ERR_CAST(shadow_batch_obj);
err = intel_engine_cmd_parser(eb->engine,
eb->batch->obj,
shadow_batch_obj,
eb->batch_start_offset,
eb->batch_len,
is_master);
if (err) {
if (err == -EACCES) /* unhandled chained batch */
vma = NULL;
else
vma = ERR_PTR(err);
goto out;
}
vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
goto out;
eb->vma[eb->buffer_count] = i915_vma_get(vma);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
vma->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
out:
i915_gem_object_unpin_pages(shadow_batch_obj);
return vma;
} | 0 | [
"CWE-20"
] | linux | 594cc251fdd0d231d342d88b2fdff4bc42fb0690 | 143,642,378,135,960,650,000,000,000,000,000,000,000 | 39 | make 'user_access_begin()' do 'access_ok()'
Originally, the rule used to be that you'd have to do access_ok()
separately, and then user_access_begin() before actually doing the
direct (optimized) user access.
But experience has shown that people then decide not to do access_ok()
at all, and instead rely on it being implied by other operations or
similar. Which makes it very hard to verify that the access has
actually been range-checked.
If you use the unsafe direct user accesses, hardware features (either
SMAP - Supervisor Mode Access Protection - on x86, or PAN - Privileged
Access Never - on ARM) do force you to use user_access_begin(). But
nothing really forces the range check.
By putting the range check into user_access_begin(), we actually force
people to do the right thing (tm), and the range check vill be visible
near the actual accesses. We have way too long a history of people
trying to avoid them.
Signed-off-by: Linus Torvalds <[email protected]> |
uint8_t winsdb_delete(struct winsdb_handle *h, struct winsdb_record *rec)
{
struct ldb_context *wins_db = h->ldb;
TALLOC_CTX *tmp_ctx = talloc_new(wins_db);
struct ldb_dn *dn;
int trans;
int ret;
trans = ldb_transaction_start(wins_db);
if (trans != LDB_SUCCESS) goto failed;
dn = winsdb_dn(tmp_ctx, wins_db, rec->name);
if (dn == NULL) goto failed;
ret = ldb_delete(wins_db, dn);
if (ret != LDB_SUCCESS) goto failed;
trans = ldb_transaction_commit(wins_db);
if (trans != LDB_SUCCESS) goto failed;
wins_hook(h, rec, WINS_HOOK_DELETE, h->hook_script);
talloc_free(tmp_ctx);
return NBT_RCODE_OK;
failed:
if (trans == LDB_SUCCESS) ldb_transaction_cancel(wins_db);
talloc_free(tmp_ctx);
return NBT_RCODE_SVR;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 317,040,827,388,252,980,000,000,000,000,000,000,000 | 30 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
static bool is_orphaned_child(struct perf_event *event)
{
return is_orphaned_event(event->parent);
} | 0 | [
"CWE-284",
"CWE-264"
] | linux | f63a8daa5812afef4f06c962351687e1ff9ccb2b | 177,891,736,163,378,100,000,000,000,000,000,000,000 | 4 | perf: Fix event->ctx locking
There have been a few reported issues wrt. the lack of locking around
changing event->ctx. This patch tries to address those.
It avoids the whole rwsem thing; and while it appears to work, please
give it some thought in review.
What I did fail at is sensible runtime checks on the use of
event->ctx, the RCU use makes it very hard.
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
apply_wqattrs_prepare(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
{
struct apply_wqattrs_ctx *ctx;
struct workqueue_attrs *new_attrs, *tmp_attrs;
int node;
lockdep_assert_held(&wq_pool_mutex);
ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
GFP_KERNEL);
new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
if (!ctx || !new_attrs || !tmp_attrs)
goto out_free;
/*
* Calculate the attrs of the default pwq.
* If the user configured cpumask doesn't overlap with the
* wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
*/
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
if (unlikely(cpumask_empty(new_attrs->cpumask)))
cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
/*
* We may create multiple pwqs with differing cpumasks. Make a
* copy of @new_attrs which will be modified and used to obtain
* pools.
*/
copy_workqueue_attrs(tmp_attrs, new_attrs);
/*
* If something goes wrong during CPU up/down, we'll fall back to
* the default pwq covering whole @attrs->cpumask. Always create
* it even if we don't use it immediately.
*/
ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
if (!ctx->dfl_pwq)
goto out_free;
for_each_node(node) {
if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
if (!ctx->pwq_tbl[node])
goto out_free;
} else {
ctx->dfl_pwq->refcnt++;
ctx->pwq_tbl[node] = ctx->dfl_pwq;
}
}
/* save the user configured attrs and sanitize it. */
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
ctx->attrs = new_attrs;
ctx->wq = wq;
free_workqueue_attrs(tmp_attrs);
return ctx;
out_free:
free_workqueue_attrs(tmp_attrs);
free_workqueue_attrs(new_attrs);
apply_wqattrs_cleanup(ctx);
return NULL;
} | 0 | [
"CWE-200"
] | tip | dfb4357da6ddbdf57d583ba64361c9d792b0e0b1 | 272,687,994,964,582,000,000,000,000,000,000,000,000 | 69 | time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]> |
irc_server_set_prefix_modes_chars (struct t_irc_server *server,
const char *prefix)
{
char *pos;
int i, length_modes, length_chars;
if (!server || !prefix)
return;
/* free previous values */
if (server->prefix_modes)
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
if (server->prefix_chars)
{
free (server->prefix_chars);
server->prefix_chars = NULL;
}
/* assign new values */
pos = strchr (prefix, ')');
if (pos)
{
server->prefix_modes = weechat_strndup (prefix + 1,
pos - prefix - 1);
if (server->prefix_modes)
{
pos++;
length_modes = strlen (server->prefix_modes);
length_chars = strlen (pos);
server->prefix_chars = malloc (length_modes + 1);
if (server->prefix_chars)
{
for (i = 0; i < length_modes; i++)
{
server->prefix_chars[i] = (i < length_chars) ? pos[i] : ' ';
}
server->prefix_chars[length_modes] = '\0';
}
else
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
}
}
} | 1 | [
"CWE-120",
"CWE-787"
] | weechat | 40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f | 165,349,132,893,930,520,000,000,000,000,000,000,000 | 49 | irc: fix crash when a new message 005 is received with longer nick prefixes
Thanks to Stuart Nevans Locke for reporting the issue. |
void DocumentSourceUnionWith::recordPlanSummaryStats(const Pipeline& pipeline) {
for (auto&& source : pipeline.getSources()) {
if (auto specificStats = source->getSpecificStats()) {
specificStats->accumulate(_stats.planSummaryStats);
}
}
} | 0 | [] | mongo | 6518b22420c5bbd92c42caf907671c3a2b140bb6 | 236,738,503,768,345,120,000,000,000,000,000,000,000 | 7 | SERVER-58203 factor out logging statements into helper functions |
static void kvm_init_msr_list(void)
{
struct x86_pmu_capability x86_pmu;
u32 dummy[2];
unsigned i;
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
"Please update the fixed PMCs in msrs_to_saved_all[]");
perf_get_x86_pmu_capability(&x86_pmu);
num_msrs_to_save = 0;
num_emulated_msrs = 0;
num_msr_based_features = 0;
for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
continue;
/*
* Even MSRs that are valid in the host may not be exposed
* to the guests in some cases.
*/
switch (msrs_to_save_all[i]) {
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported())
continue;
break;
case MSR_TSC_AUX:
if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
!kvm_cpu_cap_has(X86_FEATURE_RDPID))
continue;
break;
case MSR_IA32_UMWAIT_CONTROL:
if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
continue;
break;
case MSR_IA32_RTIT_CTL:
case MSR_IA32_RTIT_STATUS:
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
continue;
break;
case MSR_IA32_RTIT_CR3_MATCH:
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
!intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
continue;
break;
case MSR_IA32_RTIT_OUTPUT_BASE:
case MSR_IA32_RTIT_OUTPUT_MASK:
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
(!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
!intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
continue;
break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
continue;
break;
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
break;
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
break;
default:
break;
}
msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
}
for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i]))
continue;
emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
}
for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
struct kvm_msr_entry msr;
msr.index = msr_based_features_all[i];
if (kvm_get_msr_feature(&msr))
continue;
msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
}
} | 0 | [
"CWE-476"
] | linux | 55749769fe608fa3f4a075e42e89d237c8e37637 | 56,096,596,397,551,970,000,000,000,000,000,000,000 | 94 | KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
sdap_ad_tokengroups_initgroups_send(TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
struct sdap_id_ctx *id_ctx,
struct sdap_id_conn_ctx *conn,
struct sdap_options *opts,
struct sysdb_ctx *sysdb,
struct sss_domain_info *domain,
struct sdap_handle *sh,
const char *name,
const char *orig_dn,
int timeout,
bool use_id_mapping)
{
struct sdap_ad_tokengroups_initgroups_state *state = NULL;
struct tevent_req *req = NULL;
struct tevent_req *subreq = NULL;
errno_t ret;
req = tevent_req_create(mem_ctx, &state,
struct sdap_ad_tokengroups_initgroups_state);
if (req == NULL) {
DEBUG(SSSDBG_CRIT_FAILURE, "tevent_req_create() failed\n");
return NULL;
}
state->use_id_mapping = use_id_mapping;
state->domain = domain;
if (state->use_id_mapping && !IS_SUBDOMAIN(state->domain)) {
subreq = sdap_ad_tokengroups_initgr_mapping_send(state, ev, opts,
sysdb, domain, sh,
name, orig_dn,
timeout);
} else {
subreq = sdap_ad_tokengroups_initgr_posix_send(state, ev, id_ctx, conn,
opts, sysdb, domain, sh,
name, orig_dn,
timeout);
}
if (subreq == NULL) {
ret = ENOMEM;
goto immediately;
}
tevent_req_set_callback(subreq, sdap_ad_tokengroups_initgroups_done, req);
return req;
immediately:
if (ret == EOK) {
tevent_req_done(req);
} else {
tevent_req_error(req, ret);
}
tevent_req_post(req, ev);
return req;
} | 0 | [
"CWE-264"
] | sssd | 191d7f7ce3de10d9e19eaa0a6ab3319bcd4ca95d | 243,026,377,898,783,340,000,000,000,000,000,000,000 | 58 | AD: process non-posix nested groups using tokenGroups
When initgr is performed for AD supporting tokenGroups, do not skip
non-posix groups.
Resolves:
https://fedorahosted.org/sssd/ticket/2343
Reviewed-by: Michal Židek <[email protected]>
(cherry picked from commit 4932db6258ccfb612a3a28eb6a618c2f042b9d58) |
int bcf_add_id(const bcf_hdr_t *hdr, bcf1_t *line, const char *id)
{
if ( !id ) return 0;
if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR);
kstring_t tmp;
tmp.l = 0; tmp.s = line->d.id; tmp.m = line->d.m_id;
int len = strlen(id);
char *dst = line->d.id;
while ( *dst && (dst=strstr(dst,id)) )
{
if ( dst[len]!=0 && dst[len]!=';' ) dst++; // a prefix, not a match
else if ( dst==line->d.id || dst[-1]==';' ) return 0; // already present
dst++; // a suffix, not a match
}
if ( line->d.id && (line->d.id[0]!='.' || line->d.id[1]) )
{
tmp.l = strlen(line->d.id);
kputc(';',&tmp);
}
kputs(id,&tmp);
line->d.id = tmp.s; line->d.m_id = tmp.m;
line->d.shared_dirty |= BCF1_DIRTY_ID;
return 0;
} | 0 | [
"CWE-787"
] | htslib | dcd4b7304941a8832fba2d0fc4c1e716e7a4e72c | 296,055,771,817,309,850,000,000,000,000,000,000,000 | 28 | Fix check for VCF record size
The check for excessive record size in vcf_parse_format() only
looked at individual fields. It was therefore possible to
exceed the limit and overflow fmt_aux_t::offset by having
multiple fields with a combined size that went over INT_MAX.
Fix by including the amount of memory used so far in the check.
Credit to OSS-Fuzz
Fixes oss-fuzz 24097 |
int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t punch_start, punch_stop;
handle_t *handle;
unsigned int credits;
loff_t new_size, ioffset;
int ret;
/*
* We need to test this early because xfstests assumes that a
* collapse range of (0, 1) will return EOPNOTSUPP if the file
* system does not support collapse range.
*/
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return -EOPNOTSUPP;
/* Collapse range works only on fs block size aligned offsets. */
if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_CLUSTER_SIZE(sb) - 1))
return -EINVAL;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
trace_ext4_collapse_range(inode, offset, len);
punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
*/
ioffset = round_down(offset, PAGE_SIZE);
/* Write out all dirty pages */
ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
LLONG_MAX);
if (ret)
return ret;
/* Take mutex lock */
mutex_lock(&inode->i_mutex);
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
*/
if (offset + len >= i_size_read(inode)) {
ret = -EINVAL;
goto out_mutex;
}
/* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
truncate_pagecache(inode, ioffset);
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_dio;
}
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, punch_start,
EXT_MAX_BLOCKS - punch_start);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ext4_discard_preallocations(inode);
ret = ext4_ext_shift_extents(inode, handle, punch_stop,
punch_stop - punch_start, SHIFT_LEFT);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
new_size = i_size_read(inode) - len;
i_size_write(inode, new_size);
EXT4_I(inode)->i_disksize = new_size;
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
} | 1 | [
"CWE-362"
] | linux | ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | 333,318,933,843,042,230,000,000,000,000,000,000,000 | 122 | ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
static void __perf_event_exit_context(void *__info)
{
struct remove_event re = { .detach_group = true };
struct perf_event_context *ctx = __info;
rcu_read_lock();
list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
__perf_remove_from_context(&re);
rcu_read_unlock();
} | 0 | [
"CWE-416",
"CWE-362"
] | linux | 12ca6ad2e3a896256f086497a7c7406a547ee373 | 124,610,015,799,457,150,000,000,000,000,000,000,000 | 10 | perf: Fix race in swevent hash
There's a race on CPU unplug where we free the swevent hash array
while it can still have events on. This will result in a
use-after-free which is BAD.
Simply do not free the hash array on unplug. This leaves the thing
around and no use-after-free takes place.
When the last swevent dies, we do a for_each_possible_cpu() iteration
anyway to clean these up, at which time we'll free it, so no leakage
will occur.
Reported-by: Sasha Levin <[email protected]>
Tested-by: Sasha Levin <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephane Eranian <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vince Weaver <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
static int setcos_activate_file(sc_card_t *card)
{
int r;
u8 sbuf[2];
sc_apdu_t apdu;
sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0x44, 0x00, 0x00);
apdu.data = sbuf;
r = sc_transmit_apdu(card, &apdu);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "APDU transmit failed");
r = sc_check_sw(card, apdu.sw1, apdu.sw2);
SC_TEST_RET(card->ctx, SC_LOG_DEBUG_NORMAL, r, "ACTIVATE_FILE returned error");
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r);
} | 0 | [
"CWE-125"
] | OpenSC | 8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | 86,720,180,337,256,520,000,000,000,000,000,000,000 | 17 | fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes. |
/* Try to send if link is sender and has credit */
static void link_send(broker_t *b, pn_link_t *s) {
if (pn_link_is_sender(s) && pn_link_credit(s) > 0) {
const char *qname = pn_terminus_get_address(pn_link_source(s));
queue_t *q = queues_get(&b->queues, qname);
queue_send(q, s);
} | 0 | [] | qpid-proton | 159fac1f90d9b1ace1138d510176e7a5da54e9e9 | 329,284,906,369,405,800,000,000,000,000,000,000,000 | 7 | PROTON-2014: [c] Fix example broker to warn when it fails to set up ssl
- Also make send-ssl tell you the remote peer |
static void hda_codec_dev_unrealize(DeviceState *qdev)
{
HDACodecDevice *dev = HDA_CODEC_DEVICE(qdev);
HDACodecDeviceClass *cdc = HDA_CODEC_DEVICE_GET_CLASS(dev);
if (cdc->exit) {
cdc->exit(dev);
}
} | 0 | [
"CWE-787"
] | qemu | 79fa99831debc9782087e834382c577215f2f511 | 130,267,220,636,876,700,000,000,000,000,000,000,000 | 9 | hw/audio/intel-hda: Restrict DMA engine to memories (not MMIO devices)
Issue #542 reports a reentrancy problem when the DMA engine accesses
the HDA controller I/O registers. Fix by restricting the DMA engine
to memories regions (forbidding MMIO devices such the HDA controller).
Reported-by: OSS-Fuzz (Issue 28435)
Reported-by: Alexander Bulekov <[email protected]>
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Thomas Huth <[email protected]>
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/542
CVE: CVE-2021-3611
Message-Id: <[email protected]>
Signed-off-by: Thomas Huth <[email protected]> |
bm_search_notrev(regex_t* reg, const UChar* target, const UChar* target_end,
const UChar* text, const UChar* text_end,
const UChar* text_range)
{
const UChar *s, *se, *t, *p, *end;
const UChar *tail;
int skip, tlen1;
#ifdef ONIG_DEBUG_SEARCH
fprintf(stderr, "bm_search_notrev: text: %d, text_end: %d, text_range: %d\n",
(int )text, (int )text_end, (int )text_range);
#endif
tail = target_end - 1;
tlen1 = tail - target;
end = text_range;
if (end + tlen1 > text_end)
end = text_end - tlen1;
s = text;
if (IS_NULL(reg->int_map)) {
while (s < end) {
p = se = s + tlen1;
t = tail;
while (*p == *t) {
if (t == target) return (UChar* )s;
p--; t--;
}
skip = reg->map[*se];
t = s;
do {
s += enclen(reg->enc, s);
} while ((s - t) < skip && s < end);
}
}
else {
while (s < end) {
p = se = s + tlen1;
t = tail;
while (*p == *t) {
if (t == target) return (UChar* )s;
p--; t--;
}
skip = reg->int_map[*se];
t = s;
do {
s += enclen(reg->enc, s);
} while ((s - t) < skip && s < end);
}
}
return (UChar* )NULL;
} | 0 | [
"CWE-125"
] | oniguruma | 690313a061f7a4fa614ec5cc8368b4f2284e059b | 106,266,520,355,629,360,000,000,000,000,000,000,000 | 54 | fix #57 : DATA_ENSURE() check must be before data access |
eog_no_images_error_message_area_new (GFile *file)
{
GtkWidget *message_area;
gchar *error_message = NULL;
if (file != NULL) {
gchar *uri_str, *unescaped_str, *pango_escaped_str;
uri_str = g_file_get_uri (file);
/* Unescape URI with respect to rules defined in RFC 3986. */
unescaped_str = g_uri_unescape_string (uri_str, NULL);
/* Escape the URI string with respect to pango markup.
This is necessary because the URI string can contain
for example "&" which will otherwise be interpreted
as a pango markup entity when inserted into a GtkLabel. */
pango_escaped_str = g_markup_escape_text (unescaped_str, -1);
error_message = g_strdup_printf (_("No images found in '%s'."),
pango_escaped_str);
g_free (pango_escaped_str);
g_free (uri_str);
g_free (unescaped_str);
} else {
error_message = g_strdup (_("The given locations contain no images."));
}
message_area = create_error_message_area (error_message,
NULL,
EOG_ERROR_MESSAGE_AREA_NO_BUTTONS);
g_free (error_message);
return message_area;
} | 0 | [
"CWE-787"
] | eog | e99a8c00f959652fe7c10e2fa5a3a7a5c25e6af4 | 302,573,211,408,500,130,000,000,000,000,000,000,000 | 35 | EogErrorMessageArea: Make sure error messages are valid UTF8
GMarkup requires valid UTF8 input strings and would cause odd
looking messages if given invalid input. This could also trigger an
out-of-bounds write in glib before 2.44.1. Reported by kaslovdmitri.
https://bugzilla.gnome.org/show_bug.cgi?id=770143 |
SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
infop, int, options, struct rusage __user *, ru)
{
struct rusage r;
struct waitid_info info = {.status = 0};
long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
int signo = 0;
if (err > 0) {
signo = SIGCHLD;
err = 0;
if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
return -EFAULT;
}
if (!infop)
return err;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault);
unsafe_put_user(info.pid, &infop->si_pid, Efault);
unsafe_put_user(info.uid, &infop->si_uid, Efault);
unsafe_put_user(info.status, &infop->si_status, Efault);
user_access_end();
return err;
Efault:
user_access_end();
return -EFAULT;
} | 1 | [
"CWE-20"
] | linux | 96ca579a1ecc943b75beba58bebb0356f6cc4b51 | 203,669,932,404,829,300,000,000,000,000,000,000,000 | 30 | waitid(): Add missing access_ok() checks
Adds missing access_ok() checks.
CVE-2017-5123
Reported-by: Chris Salls <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: Al Viro <[email protected]>
Fixes: 4c48abe91be0 ("waitid(): switch copyout of siginfo to unsafe_put_user()")
Cc: [email protected] # 4.13
Signed-off-by: Linus Torvalds <[email protected]> |
static void WAddLineToHist(Window *wp, struct mline *ml)
{
uint32_t *q, *o;
struct mline *hml;
if (wp->w_histheight == 0)
return;
hml = &wp->w_hlines[wp->w_histidx];
q = ml->image;
ml->image = hml->image;
hml->image = q;
q = ml->attr;
o = hml->attr;
hml->attr = q;
ml->attr = null;
if (o != null)
free(o);
q = ml->font;
o = hml->font;
hml->font = q;
ml->font = null;
if (o != null)
free(o);
q = ml->fontx;
o = hml->fontx;
hml->fontx = q;
ml->fontx = null;
if (o != null)
free(o);
q = ml->colorbg;
o = hml->colorbg;
hml->colorbg = q;
ml->colorbg = null;
if (o != null)
free(o);
q = ml->colorfg;
o = hml->colorfg;
hml->colorfg = q;
ml->colorfg = null;
if (o != null)
free(o);
if (++wp->w_histidx >= wp->w_histheight)
wp->w_histidx = 0;
} | 0 | [
"CWE-119"
] | screen | c336a32a1dcd445e6b83827f83531d4c6414e2cd | 73,975,007,069,533,790,000,000,000,000,000,000,000 | 46 | Fix stack overflow due to too deep recursion
Bug: 45713
How to reproduce:
Run this command inside screen
$ printf '\x1b[10000000T'
screen will recursively call MScrollV to depth n/256. This is time consuming and will overflow stack if n is huge. |
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
u16 len;
u8 event;
__unpack_control(chan, skb);
len = skb->len;
/*
* We can just drop the corrupted I-frame here.
* Receiver will miss it and start proper recovery
* procedures and ask for retransmission.
*/
if (l2cap_check_fcs(chan, skb))
goto drop;
if (!control->sframe && control->sar == L2CAP_SAR_START)
len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
len -= L2CAP_FCS_SIZE;
if (len > chan->mps) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto drop;
}
if ((chan->mode == L2CAP_MODE_ERTM ||
chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
goto drop;
if (!control->sframe) {
int err;
BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
control->sar, control->reqseq, control->final,
control->txseq);
/* Validate F-bit - F=0 always valid, F=1 only
* valid in TX WAIT_F
*/
if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
goto drop;
if (chan->mode != L2CAP_MODE_STREAMING) {
event = L2CAP_EV_RECV_IFRAME;
err = l2cap_rx(chan, control, skb, event);
} else {
err = l2cap_stream_rx(chan, control, skb);
}
if (err)
l2cap_send_disconn_req(chan, ECONNRESET);
} else {
const u8 rx_func_to_event[4] = {
L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
};
/* Only I-frames are expected in streaming mode */
if (chan->mode == L2CAP_MODE_STREAMING)
goto drop;
BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
control->reqseq, control->final, control->poll,
control->super);
if (len != 0) {
BT_ERR("Trailing bytes: %d in sframe", len);
l2cap_send_disconn_req(chan, ECONNRESET);
goto drop;
}
/* Validate F and P bits */
if (control->final && (control->poll ||
chan->tx_state != L2CAP_TX_STATE_WAIT_F))
goto drop;
event = rx_func_to_event[control->super];
if (l2cap_rx(chan, control, skb, event))
l2cap_send_disconn_req(chan, ECONNRESET);
}
return 0;
drop:
kfree_skb(skb);
return 0;
} | 0 | [
"CWE-787"
] | linux | e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 | 17,566,953,344,136,978,000,000,000,000,000,000,000 | 91 | Bluetooth: Properly check L2CAP config option output buffer length
Validate the output buffer length for L2CAP config requests and responses
to avoid overflowing the stack buffer used for building the option blocks.
Cc: [email protected]
Signed-off-by: Ben Seri <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
check_access(
char * filename,
int mode)
{
char *noun, *adjective;
char *quoted = quote_string(filename);
gboolean result;
if(mode == F_OK)
noun = "find", adjective = "exists";
else if((mode & X_OK) == X_OK)
noun = "execute", adjective = "executable";
else if((mode & (W_OK|R_OK)) == (W_OK|R_OK))
noun = "read/write", adjective = "read/writable";
else
noun = "access", adjective = "accessible";
if(EUIDACCESS(filename, mode) == -1) {
g_printf(_("ERROR [can not %s %s: %s (ruid:%d euid:%d)\n"), noun, quoted, strerror(errno),
(int)getuid(), (int)geteuid());
result = FALSE;
} else {
g_printf(_("OK %s %s (ruid:%d euid:%d)\n"), quoted, adjective,
(int)getuid(), (int)geteuid());
result = TRUE;
}
amfree(quoted);
return result;
} | 0 | [
"CWE-264"
] | amanda | 4bf5b9b356848da98560ffbb3a07a9cb5c4ea6d7 | 335,897,896,455,401,760,000,000,000,000,000,000,000 | 29 | * Add a /etc/amanda-security.conf file
git-svn-id: https://svn.code.sf.net/p/amanda/code/amanda/branches/3_3@6486 a8d146d6-cc15-0410-8900-af154a0219e0 |
ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer_event *event;
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
again:
event = rb_iter_peek(iter, ts);
if (!event)
goto out;
if (event->type_len == RINGBUF_TYPE_PADDING)
goto again;
rb_advance_iter(iter);
out:
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return event;
} | 0 | [
"CWE-190"
] | linux-stable | 59643d1535eb220668692a5359de22545af579f6 | 267,199,876,131,713,330,000,000,000,000,000,000,000 | 21 | ring-buffer: Prevent overflow of size in ring_buffer_resize()
If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE
then the DIV_ROUND_UP() will return zero.
Here's the details:
# echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb
tracing_entries_write() processes this and converts kb to bytes.
18014398509481980 << 10 = 18446744073709547520
and this is passed to ring_buffer_resize() as unsigned long size.
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
Where DIV_ROUND_UP(a, b) is (a + b - 1)/b
BUF_PAGE_SIZE is 4080 and here
18446744073709547520 + 4080 - 1 = 18446744073709551599
where 18446744073709551599 is still smaller than 2^64
2^64 - 18446744073709551599 = 17
But now 18446744073709551599 / 4080 = 4521260802379792
and size = size * 4080 = 18446744073709551360
This is checked to make sure its still greater than 2 * 4080,
which it is.
Then we convert to the number of buffer pages needed.
nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE)
but this time size is 18446744073709551360 and
2^64 - (18446744073709551360 + 4080 - 1) = -3823
Thus it overflows and the resulting number is less than 4080, which makes
3823 / 4080 = 0
an nr_pages is set to this. As we already checked against the minimum that
nr_pages may be, this causes the logic to fail as well, and we crash the
kernel.
There's no reason to have the two DIV_ROUND_UP() (that's just result of
historical code changes), clean up the code and fix this bug.
Cc: [email protected] # 3.5+
Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic")
Signed-off-by: Steven Rostedt <[email protected]> |
template<typename tz,typename tc>
CImg<T>& draw_line(CImg<tz>& zbuffer,
const int x0, const int y0, const float z0,
const int x1, const int y1, const float z1,
const tc *const color, const float opacity=1,
const unsigned int pattern=~0U, const bool init_hatch=true) {
typedef typename cimg::superset<tz,float>::type tzfloat;
if (is_empty() || z0<=0 || z1<=0) return *this;
if (!color)
throw CImgArgumentException(_cimg_instance
"draw_line(): Specified color is (null).",
cimg_instance);
if (!is_sameXY(zbuffer))
throw CImgArgumentException(_cimg_instance
"draw_line(): Instance and specified Z-buffer (%u,%u,%u,%u,%p) have "
"different dimensions.",
cimg_instance,
zbuffer._width,zbuffer._height,zbuffer._depth,zbuffer._spectrum,zbuffer._data);
static unsigned int hatch = ~0U - (~0U>>1);
if (init_hatch) hatch = ~0U - (~0U>>1);
const bool xdir = x0<x1, ydir = y0<y1;
int
nx0 = x0, nx1 = x1, ny0 = y0, ny1 = y1,
&xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
&xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
&xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
&xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
tzfloat
Z0 = 1/(tzfloat)z0, Z1 = 1/(tzfloat)z1, nz0 = Z0, nz1 = Z1, dz = Z1 - Z0,
&zleft = xdir?nz0:nz1,
&zright = xdir?nz1:nz0,
&zup = ydir?nz0:nz1,
&zdown = ydir?nz1:nz0;
if (xright<0 || xleft>=width()) return *this;
if (xleft<0) {
const float D = (float)xright - xleft;
yleft-=(int)((float)xleft*((float)yright - yleft)/D);
zleft-=(tzfloat)xleft*(zright - zleft)/D;
xleft = 0;
}
if (xright>=width()) {
const float d = (float)xright - width(), D = (float)xright - xleft;
yright-=(int)(d*((float)yright - yleft)/D);
zright-=(tzfloat)d*(zright - zleft)/D;
xright = width() - 1;
}
if (ydown<0 || yup>=height()) return *this;
if (yup<0) {
const float D = (float)ydown - yup;
xup-=(int)((float)yup*((float)xdown - xup)/D);
zup-=(tzfloat)yup*(zdown - zup)/D;
yup = 0;
}
if (ydown>=height()) {
const float d = (float)ydown - height(), D = (float)ydown - yup;
xdown-=(int)(d*((float)xdown - xup)/D);
zdown-=(tzfloat)d*(zdown - zup)/D;
ydown = height() - 1;
}
T *ptrd0 = data(nx0,ny0);
tz *ptrz = zbuffer.data(nx0,ny0);
int dx = xright - xleft, dy = ydown - yup;
const bool steep = dy>dx;
if (steep) cimg::swap(nx0,ny0,nx1,ny1,dx,dy);
const longT
offx = (longT)(nx0<nx1?1:-1)*(steep?width():1),
offy = (longT)(ny0<ny1?1:-1)*(steep?1:width());
const ulongT
wh = (ulongT)_width*_height,
ndx = (ulongT)(dx>0?dx:1);
if (opacity>=1) {
if (~pattern) for (int error = dx>>1, x = 0; x<=dx; ++x) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz && pattern&hatch) {
*ptrz = (tz)z;
T *ptrd = ptrd0; const tc *col = color;
cimg_forC(*this,c) { *ptrd = (T)*(col++); ptrd+=wh; }
}
hatch>>=1; if (!hatch) hatch = ~0U - (~0U>>1);
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
} else for (int error = dx>>1, x = 0; x<=dx; ++x) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz) {
*ptrz = (tz)z;
T *ptrd = ptrd0; const tc *col = color;
cimg_forC(*this,c) { *ptrd = (T)*(col++); ptrd+=wh; }
}
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
}
} else {
const float nopacity = cimg::abs(opacity), copacity = 1 - std::max(opacity,0.0f);
if (~pattern) for (int error = dx>>1, x = 0; x<=dx; ++x) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz && pattern&hatch) {
*ptrz = (tz)z;
T *ptrd = ptrd0; const tc *col = color;
cimg_forC(*this,c) { *ptrd = (T)(nopacity**(col++) + *ptrd*copacity); ptrd+=wh; }
}
hatch>>=1; if (!hatch) hatch = ~0U - (~0U>>1);
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
} else for (int error = dx>>1, x = 0; x<=dx; ++x) {
const tzfloat z = Z0 + x*dz/ndx;
if (z>=(tzfloat)*ptrz) {
*ptrz = (tz)z;
T *ptrd = ptrd0; const tc *col = color;
cimg_forC(*this,c) { *ptrd = (T)(nopacity**(col++) + *ptrd*copacity); ptrd+=wh; }
}
ptrd0+=offx; ptrz+=offx;
if ((error-=dy)<0) { ptrd0+=offy; ptrz+=offy; error+=dx; }
}
}
return *this; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 76,388,357,045,253,050,000,000,000,000,000,000,000 | 115 | Fix other issues in 'CImg<T>::load_bmp()'. |
int test_gf2m_mod_inv(BIO *bp,BN_CTX *ctx)
{
BIGNUM *a,*b[2],*c,*d;
int i, j, ret = 0;
int p0[] = {163,7,6,3,0,-1};
int p1[] = {193,15,0,-1};
a=BN_new();
b[0]=BN_new();
b[1]=BN_new();
c=BN_new();
d=BN_new();
BN_GF2m_arr2poly(p0, b[0]);
BN_GF2m_arr2poly(p1, b[1]);
for (i=0; i<num0; i++)
{
BN_bntest_rand(a, 512, 0, 0);
for (j=0; j < 2; j++)
{
BN_GF2m_mod_inv(c, a, b[j], ctx);
BN_GF2m_mod_mul(d, a, c, b[j], ctx);
#if 0 /* make test uses ouput in bc but bc can't handle GF(2^m) arithmetic */
if (bp != NULL)
{
if (!results)
{
BN_print(bp,a);
BIO_puts(bp, " * ");
BN_print(bp,c);
BIO_puts(bp," - 1 % ");
BN_print(bp,b[j]);
BIO_puts(bp,"\n");
}
}
#endif
/* Test that ((1/a)*a) = 1. */
if(!BN_is_one(d))
{
fprintf(stderr,"GF(2^m) modular inversion test failed!\n");
goto err;
}
}
}
ret = 1;
err:
BN_free(a);
BN_free(b[0]);
BN_free(b[1]);
BN_free(c);
BN_free(d);
return ret;
} | 0 | [
"CWE-310"
] | openssl | a7a44ba55cb4f884c6bc9ceac90072dea38e66d0 | 250,091,733,529,340,150,000,000,000,000,000,000,000 | 54 | Fix for CVE-2014-3570 (with minor bn_asm.c revamp).
Reviewed-by: Emilia Kasper <[email protected]> |
static inline void php_rshutdown_session_globals(TSRMLS_D) /* {{{ */
{
if (PS(http_session_vars)) {
zval_ptr_dtor(&PS(http_session_vars));
PS(http_session_vars) = NULL;
}
/* Do NOT destroy PS(mod_user_names) here! */
if (PS(mod_data) || PS(mod_user_implemented)) {
zend_try {
PS(mod)->s_close(&PS(mod_data) TSRMLS_CC);
} zend_end_try();
}
if (PS(id)) {
efree(PS(id));
PS(id) = NULL;
}
} | 0 | [
"CWE-416"
] | php-src | 3798eb6fd5dddb211b01d41495072fd9858d4e32 | 125,166,098,189,937,270,000,000,000,000,000,000,000 | 17 | Fix bug #72562 - destroy var_hash properly |
Subsets and Splits