func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
parserInit(XML_Parser parser, const XML_Char *encodingName) { processor = prologInitProcessor; XmlPrologStateInit(&prologState); protocolEncodingName = (encodingName != NULL ? poolCopyString(&tempPool, encodingName) : NULL); curBase = NULL; XmlInitEncoding(&initEncoding, &encoding, 0); userData = NULL; handlerArg = NULL; startElementHandler = NULL; endElementHandler = NULL; characterDataHandler = NULL; processingInstructionHandler = NULL; commentHandler = NULL; startCdataSectionHandler = NULL; endCdataSectionHandler = NULL; defaultHandler = NULL; startDoctypeDeclHandler = NULL; endDoctypeDeclHandler = NULL; unparsedEntityDeclHandler = NULL; notationDeclHandler = NULL; startNamespaceDeclHandler = NULL; endNamespaceDeclHandler = NULL; notStandaloneHandler = NULL; externalEntityRefHandler = NULL; externalEntityRefHandlerArg = parser; skippedEntityHandler = NULL; elementDeclHandler = NULL; attlistDeclHandler = NULL; entityDeclHandler = NULL; xmlDeclHandler = NULL; bufferPtr = buffer; bufferEnd = buffer; parseEndByteIndex = 0; parseEndPtr = NULL; declElementType = NULL; declAttributeId = NULL; declEntity = NULL; doctypeName = NULL; doctypeSysid = NULL; doctypePubid = NULL; declAttributeType = NULL; declNotationName = NULL; declNotationPublicId = NULL; declAttributeIsCdata = XML_FALSE; declAttributeIsId = XML_FALSE; memset(&position, 0, sizeof(POSITION)); errorCode = XML_ERROR_NONE; eventPtr = NULL; eventEndPtr = NULL; positionPtr = NULL; openInternalEntities = NULL; defaultExpandInternalEntities = XML_TRUE; tagLevel = 0; tagStack = NULL; inheritedBindings = NULL; nSpecifiedAtts = 0; unknownEncodingMem = NULL; unknownEncodingRelease = NULL; unknownEncodingData = NULL; parentParser = NULL; ps_parsing = XML_INITIALIZED; #ifdef XML_DTD isParamEntity = XML_FALSE; useForeignDTD = XML_FALSE; paramEntityParsing = XML_PARAM_ENTITY_PARSING_NEVER; #endif hash_secret_salt = 0; }
0
[ "CWE-119" ]
libexpat
ba0f9c3b40c264b8dd392e02a7a060a8fa54f032
289,578,243,552,329,420,000,000,000,000,000,000,000
71
CVE-2015-1283 Sanity check size calculations. r=peterv, a=abillings https://sourceforge.net/p/expat/bugs/528/
static void php_mcrypt_module_dtor(zend_rsrc_list_entry *rsrc TSRMLS_DC) /* {{{ */ { php_mcrypt *pm = (php_mcrypt *) rsrc->ptr; if (pm) { mcrypt_generic_deinit(pm->td); mcrypt_module_close(pm->td); efree(pm); pm = NULL; }
0
[ "CWE-190" ]
php-src
6c5211a0cef0cc2854eaa387e0eb036e012904d0
289,228,263,847,761,950,000,000,000,000,000,000,000
10
Fix bug #72455: Heap Overflow due to integer overflows
void Statement::GetRow(Row* row, sqlite3_stmt* stmt) { int rows = sqlite3_column_count(stmt); for (int i = 0; i < rows; i++) { int type = sqlite3_column_type(stmt, i); const char* name = sqlite3_column_name(stmt, i); switch (type) { case SQLITE_INTEGER: { row->push_back(new Values::Integer(name, sqlite3_column_int64(stmt, i))); } break; case SQLITE_FLOAT: { row->push_back(new Values::Float(name, sqlite3_column_double(stmt, i))); } break; case SQLITE_TEXT: { const char* text = (const char*)sqlite3_column_text(stmt, i); int length = sqlite3_column_bytes(stmt, i); row->push_back(new Values::Text(name, length, text)); } break; case SQLITE_BLOB: { const void* blob = sqlite3_column_blob(stmt, i); int length = sqlite3_column_bytes(stmt, i); row->push_back(new Values::Blob(name, length, blob)); } break; case SQLITE_NULL: { row->push_back(new Values::Null(name)); } break; default: assert(false); } } }
0
[]
node-sqlite3
593c9d498be2510d286349134537e3bf89401c4a
310,806,750,283,073,020,000,000,000,000,000,000,000
31
bug: fix segfault of invalid toString() object (#1450) * bug: verify toString() returns valid data * test: faulty toString test
gs_setdevice_no_init(gs_gstate * pgs, gx_device * dev) { /* * Just set the device, possibly changing color space but no other * device parameters. * * Make sure we don't close the device if dev == pgs->device * This could be done by allowing the rc_assign to close the * old 'dev' if the rc goes to 0 (via the device structure's * finalization procedure), but then the 'code' from the dev * closedevice would not be propagated up. We want to allow * the code to be handled, particularly for the pdfwrite * device. */ if (pgs->device != NULL && pgs->device->rc.ref_count == 1 && pgs->device != dev) { int code = gs_closedevice(pgs->device); if (code < 0) return code; } rc_assign(pgs->device, dev, "gs_setdevice_no_init"); gs_gstate_update_device(pgs, dev); return pgs->overprint ? gs_do_set_overprint(pgs) : 0; }
0
[]
ghostpdl
79cccf641486a6595c43f1de1cd7ade696020a31
287,183,420,720,207,040,000,000,000,000,000,000,000
25
Bug 699654(2): preserve LockSafetyParams in the nulldevice The nulldevice does not necessarily use the normal setpagedevice machinery, but can be set using the nulldevice operator. In which case, we don't preserve the settings from the original device (in the way setpagedevice does). Since nulldevice does nothing, this is not generally a problem, but in the case of LockSafetyParams it *is* important when we restore back to the original device, when LockSafetyParams not being set is "preserved" into the post- restore configuration. We have to initialise the value to false because the nulldevice is used during initialisation (before any other device exists), and *must* be writable for that.
xmlFACompareAtomTypes(xmlRegAtomType type1, xmlRegAtomType type2) { if ((type1 == XML_REGEXP_EPSILON) || (type1 == XML_REGEXP_CHARVAL) || (type1 == XML_REGEXP_RANGES) || (type1 == XML_REGEXP_SUBREG) || (type1 == XML_REGEXP_STRING) || (type1 == XML_REGEXP_ANYCHAR)) return(1); if ((type2 == XML_REGEXP_EPSILON) || (type2 == XML_REGEXP_CHARVAL) || (type2 == XML_REGEXP_RANGES) || (type2 == XML_REGEXP_SUBREG) || (type2 == XML_REGEXP_STRING) || (type2 == XML_REGEXP_ANYCHAR)) return(1); if (type1 == type2) return(1); /* simplify subsequent compares by making sure type1 < type2 */ if (type1 > type2) { xmlRegAtomType tmp = type1; type1 = type2; type2 = tmp; } switch (type1) { case XML_REGEXP_ANYSPACE: /* \s */ /* can't be a letter, number, mark, pontuation, symbol */ if ((type2 == XML_REGEXP_NOTSPACE) || ((type2 >= XML_REGEXP_LETTER) && (type2 <= XML_REGEXP_LETTER_OTHERS)) || ((type2 >= XML_REGEXP_NUMBER) && (type2 <= XML_REGEXP_NUMBER_OTHERS)) || ((type2 >= XML_REGEXP_MARK) && (type2 <= XML_REGEXP_MARK_ENCLOSING)) || ((type2 >= XML_REGEXP_PUNCT) && (type2 <= XML_REGEXP_PUNCT_OTHERS)) || ((type2 >= XML_REGEXP_SYMBOL) && (type2 <= XML_REGEXP_SYMBOL_OTHERS)) ) return(0); break; case XML_REGEXP_NOTSPACE: /* \S */ break; case XML_REGEXP_INITNAME: /* \l */ /* can't be a number, mark, separator, pontuation, symbol or other */ if ((type2 == XML_REGEXP_NOTINITNAME) || ((type2 >= XML_REGEXP_NUMBER) && (type2 <= XML_REGEXP_NUMBER_OTHERS)) || ((type2 >= XML_REGEXP_MARK) && (type2 <= XML_REGEXP_MARK_ENCLOSING)) || ((type2 >= XML_REGEXP_SEPAR) && (type2 <= XML_REGEXP_SEPAR_PARA)) || ((type2 >= XML_REGEXP_PUNCT) && (type2 <= XML_REGEXP_PUNCT_OTHERS)) || ((type2 >= XML_REGEXP_SYMBOL) && (type2 <= XML_REGEXP_SYMBOL_OTHERS)) || ((type2 >= XML_REGEXP_OTHER) && (type2 <= XML_REGEXP_OTHER_NA)) ) return(0); break; case XML_REGEXP_NOTINITNAME: /* \L */ break; case XML_REGEXP_NAMECHAR: /* \c */ /* can't be a mark, separator, pontuation, symbol or other */ if ((type2 == XML_REGEXP_NOTNAMECHAR) || ((type2 >= XML_REGEXP_MARK) && (type2 <= XML_REGEXP_MARK_ENCLOSING)) || ((type2 >= XML_REGEXP_PUNCT) && (type2 <= XML_REGEXP_PUNCT_OTHERS)) || ((type2 >= XML_REGEXP_SEPAR) && (type2 <= XML_REGEXP_SEPAR_PARA)) || ((type2 >= XML_REGEXP_SYMBOL) && (type2 <= XML_REGEXP_SYMBOL_OTHERS)) || ((type2 >= XML_REGEXP_OTHER) && (type2 <= XML_REGEXP_OTHER_NA)) ) return(0); break; case XML_REGEXP_NOTNAMECHAR: /* \C */ break; case XML_REGEXP_DECIMAL: /* \d */ /* can't be a letter, mark, separator, pontuation, symbol or other */ if ((type2 == XML_REGEXP_NOTDECIMAL) || (type2 == XML_REGEXP_REALCHAR) || ((type2 >= XML_REGEXP_LETTER) && (type2 <= XML_REGEXP_LETTER_OTHERS)) || ((type2 >= XML_REGEXP_MARK) && (type2 <= XML_REGEXP_MARK_ENCLOSING)) || ((type2 >= XML_REGEXP_PUNCT) && (type2 <= XML_REGEXP_PUNCT_OTHERS)) || ((type2 >= XML_REGEXP_SEPAR) && (type2 <= XML_REGEXP_SEPAR_PARA)) || ((type2 >= XML_REGEXP_SYMBOL) && (type2 <= XML_REGEXP_SYMBOL_OTHERS)) || ((type2 >= XML_REGEXP_OTHER) && (type2 <= XML_REGEXP_OTHER_NA)) )return(0); break; case XML_REGEXP_NOTDECIMAL: /* \D */ break; case XML_REGEXP_REALCHAR: /* \w */ /* can't be a mark, separator, pontuation, symbol or other */ if ((type2 == XML_REGEXP_NOTDECIMAL) || ((type2 >= XML_REGEXP_MARK) && (type2 <= XML_REGEXP_MARK_ENCLOSING)) || ((type2 >= XML_REGEXP_PUNCT) && (type2 <= XML_REGEXP_PUNCT_OTHERS)) || ((type2 >= XML_REGEXP_SEPAR) && (type2 <= XML_REGEXP_SEPAR_PARA)) || ((type2 >= XML_REGEXP_SYMBOL) && (type2 <= XML_REGEXP_SYMBOL_OTHERS)) || ((type2 >= XML_REGEXP_OTHER) && (type2 <= XML_REGEXP_OTHER_NA)) )return(0); break; case XML_REGEXP_NOTREALCHAR: /* \W */ break; /* * at that point we know both type 1 and type2 are from * character categories are ordered and are different, * it becomes simple because this is a partition */ case XML_REGEXP_LETTER: if (type2 <= XML_REGEXP_LETTER_OTHERS) return(1); return(0); case XML_REGEXP_LETTER_UPPERCASE: case XML_REGEXP_LETTER_LOWERCASE: case XML_REGEXP_LETTER_TITLECASE: case XML_REGEXP_LETTER_MODIFIER: case XML_REGEXP_LETTER_OTHERS: return(0); case XML_REGEXP_MARK: if (type2 <= XML_REGEXP_MARK_ENCLOSING) return(1); return(0); case XML_REGEXP_MARK_NONSPACING: case XML_REGEXP_MARK_SPACECOMBINING: case XML_REGEXP_MARK_ENCLOSING: return(0); case XML_REGEXP_NUMBER: if (type2 <= XML_REGEXP_NUMBER_OTHERS) return(1); return(0); case XML_REGEXP_NUMBER_DECIMAL: case XML_REGEXP_NUMBER_LETTER: case XML_REGEXP_NUMBER_OTHERS: return(0); case XML_REGEXP_PUNCT: if (type2 <= XML_REGEXP_PUNCT_OTHERS) return(1); return(0); case XML_REGEXP_PUNCT_CONNECTOR: case XML_REGEXP_PUNCT_DASH: case XML_REGEXP_PUNCT_OPEN: case XML_REGEXP_PUNCT_CLOSE: case XML_REGEXP_PUNCT_INITQUOTE: case XML_REGEXP_PUNCT_FINQUOTE: case XML_REGEXP_PUNCT_OTHERS: return(0); case XML_REGEXP_SEPAR: if (type2 <= XML_REGEXP_SEPAR_PARA) return(1); return(0); case XML_REGEXP_SEPAR_SPACE: case XML_REGEXP_SEPAR_LINE: case XML_REGEXP_SEPAR_PARA: return(0); case XML_REGEXP_SYMBOL: if (type2 <= XML_REGEXP_SYMBOL_OTHERS) return(1); return(0); case XML_REGEXP_SYMBOL_MATH: case XML_REGEXP_SYMBOL_CURRENCY: case XML_REGEXP_SYMBOL_MODIFIER: case XML_REGEXP_SYMBOL_OTHERS: return(0); case XML_REGEXP_OTHER: if (type2 <= XML_REGEXP_OTHER_NA) return(1); return(0); case XML_REGEXP_OTHER_CONTROL: case XML_REGEXP_OTHER_FORMAT: case XML_REGEXP_OTHER_PRIVATE: case XML_REGEXP_OTHER_NA: return(0); default: break; } return(1); }
0
[ "CWE-119" ]
libxml2
cbb271655cadeb8dbb258a64701d9a3a0c4835b4
53,481,561,381,399,790,000,000,000,000,000,000,000
189
Bug 757711: heap-buffer-overflow in xmlFAParsePosCharGroup <https://bugzilla.gnome.org/show_bug.cgi?id=757711> * xmlregexp.c: (xmlFAParseCharRange): Only advance to the next character if there is no error. Advancing to the next character in case of an error while parsing regexp leads to an out of bounds access.
static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace) { struct scrub_block *sblock; int index; sblock = kzalloc(sizeof(*sblock), GFP_KERNEL); if (!sblock) { spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); return -ENOMEM; } /* one ref inside this function, plus one for each page added to * a bio later on */ refcount_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; for (index = 0; len > 0; index++) { struct scrub_page *spage; u64 l = min_t(u64, len, PAGE_SIZE); spage = kzalloc(sizeof(*spage), GFP_KERNEL); if (!spage) { leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); scrub_block_put(sblock); return -ENOMEM; } BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); scrub_page_get(spage); sblock->pagev[index] = spage; spage->sblock = sblock; spage->dev = dev; spage->flags = flags; spage->generation = gen; spage->logical = logical; spage->physical = physical; spage->physical_for_dev_replace = physical_for_dev_replace; spage->mirror_num = mirror_num; if (csum) { spage->have_csum = 1; memcpy(spage->csum, csum, sctx->csum_size); } else { spage->have_csum = 0; } sblock->page_count++; spage->page = alloc_page(GFP_KERNEL); if (!spage->page) goto leave_nomem; len -= l; logical += l; physical += l; physical_for_dev_replace += l; } WARN_ON(sblock->page_count == 0); if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) { /* * This case should only be hit for RAID 5/6 device replace. See * the comment in scrub_missing_raid56_pages() for details. */ scrub_missing_raid56_pages(sblock); } else { for (index = 0; index < sblock->page_count; index++) { struct scrub_page *spage = sblock->pagev[index]; int ret; ret = scrub_add_page_to_rd_bio(sctx, spage); if (ret) { scrub_block_put(sblock); return ret; } } if (force) scrub_submit(sctx); } /* last one frees, either here or in bio completion for last page */ scrub_block_put(sblock); return 0; }
0
[ "CWE-476", "CWE-284" ]
linux
09ba3bc9dd150457c506e4661380a6183af651c1
30,975,144,117,832,500,000,000,000,000,000,000,000
89
btrfs: merge btrfs_find_device and find_device Both btrfs_find_device() and find_device() does the same thing except that the latter does not take the seed device onto account in the device scanning context. We can merge them. Signed-off-by: Anand Jain <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
static ssize_t nbd_send_reply(QIOChannel *ioc, NBDReply *reply) { uint8_t buf[NBD_REPLY_SIZE]; reply->error = system_errno_to_nbd_errno(reply->error); TRACE("Sending response to client: { .error = %" PRId32 ", handle = %" PRIu64 " }", reply->error, reply->handle); /* Reply [ 0 .. 3] magic (NBD_REPLY_MAGIC) [ 4 .. 7] error (0 == no error) [ 7 .. 15] handle */ stl_be_p(buf, NBD_REPLY_MAGIC); stl_be_p(buf + 4, reply->error); stq_be_p(buf + 8, reply->handle); return nbd_write(ioc, buf, sizeof(buf), NULL); }
0
[ "CWE-20" ]
qemu
2b0bbc4f8809c972bad134bc1a2570dbb01dea0b
155,843,001,216,382,500,000,000,000,000,000,000,000
21
nbd/server: get rid of nbd_negotiate_read and friends Functions nbd_negotiate_{read,write,drop_sync} were introduced in 1a6245a5b, when nbd_rwv (was nbd_wr_sync) was working through qemu_co_sendv_recvv (the path is nbd_wr_sync -> qemu_co_{recv/send} -> qemu_co_send_recv -> qemu_co_sendv_recvv), which just yields, without setting any handlers. But starting from ff82911cd nbd_rwv (was nbd_wr_syncv) works through qio_channel_yield() which sets handlers, so watchers are redundant in nbd_negotiate_{read,write,drop_sync}, then, let's just use nbd_{read,write,drop} functions. Functions nbd_{read,write,drop} has errp parameter, which is unused in this patch. This will be fixed later. Signed-off-by: Vladimir Sementsov-Ogievskiy <[email protected]> Reviewed-by: Eric Blake <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(env, regs + regno); return; } __mark_reg_known_zero(regs + regno); }
0
[ "CWE-119", "CWE-681", "CWE-787" ]
linux
5b9fbeb75b6a98955f628e205ac26689bcb1383e
116,615,299,804,389,930,000,000,000,000,000,000,000
12
bpf: Fix scalar32_min_max_or bounds tracking Simon reported an issue with the current scalar32_min_max_or() implementation. That is, compared to the other 32 bit subreg tracking functions, the code in scalar32_min_max_or() stands out that it's using the 64 bit registers instead of 32 bit ones. This leads to bounds tracking issues, for example: [...] 8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 8: (79) r1 = *(u64 *)(r0 +0) R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 9: (b7) r0 = 1 10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 10: (18) r2 = 0x600000002 12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 12: (ad) if r1 < r2 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: (95) exit 14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 14: (25) if r1 > 0x0 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: (95) exit 16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 16: (47) r1 |= 0 17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x1; 0x700000000),s32_max_value=1,u32_max_value=1) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm [...] The bound tests on the map value force the upper unsigned bound to be 25769803777 in 64 bit (0b11000000000000000000000000000000001) and then lower one to be 1. By using OR they are truncated and thus result in the range [1,1] for the 32 bit reg tracker. This is incorrect given the only thing we know is that the value must be positive and thus 2147483647 (0b1111111111111111111111111111111) at max for the subregs. Fix it by using the {u,s}32_{min,max}_value vars instead. This also makes sense, for example, for the case where we update dst_reg->s32_{min,max}_value in the else branch we need to use the newly computed dst_reg->u32_{min,max}_value as we know that these are positive. Previously, in the else branch the 64 bit values of umin_value=1 and umax_value=32212254719 were used and latter got truncated to be 1 as upper bound there. After the fix the subreg range is now correct: [...] 8: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 8: (79) r1 = *(u64 *)(r0 +0) R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R10=fp0 fp-8=mmmmmmmm 9: R0=map_value(id=0,off=0,ks=4,vs=48,imm=0) R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 9: (b7) r0 = 1 10: R0_w=inv1 R1_w=inv(id=0) R10=fp0 fp-8=mmmmmmmm 10: (18) r2 = 0x600000002 12: R0_w=inv1 R1_w=inv(id=0) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 12: (ad) if r1 < r2 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: R0_w=inv1 R1_w=inv(id=0,umin_value=25769803778) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 13: (95) exit 14: R0_w=inv1 R1_w=inv(id=0,umax_value=25769803777,var_off=(0x0; 0x7ffffffff)) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 14: (25) if r1 > 0x0 goto pc+1 R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: R0_w=inv1 R1_w=inv(id=0,umax_value=0,var_off=(0x0; 0x7fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 15: (95) exit 16: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=25769803777,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm 16: (47) r1 |= 0 17: R0_w=inv1 R1_w=inv(id=0,umin_value=1,umax_value=32212254719,var_off=(0x0; 0x77fffffff),u32_max_value=2147483647) R2_w=inv25769803778 R10=fp0 fp-8=mmmmmmmm [...] Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking") Reported-by: Simon Scannell <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Reviewed-by: John Fastabend <[email protected]> Acked-by: Alexei Starovoitov <[email protected]>
static int handle_pml_full(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; trace_kvm_pml_full(vcpu->vcpu_id); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); /* * PML buffer FULL happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && enable_vnmi && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); /* * PML buffer already flushed at beginning of VMEXIT. Nothing to do * here.., and there's no userspace involvement needed for PML. */ return 1; }
0
[ "CWE-284" ]
linux
727ba748e110b4de50d142edca9d6a9b7e6111d8
109,567,242,196,352,440,000,000,000,000,000,000,000
24
kvm: nVMX: Enforce cpl=0 for VMX instructions VMX instructions executed inside a L1 VM will always trigger a VM exit even when executed with cpl 3. This means we must perform the privilege check in software. Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks") Cc: [email protected] Signed-off-by: Felix Wilhelm <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
get_key(krb5_context context, pkinit_identity_crypto_context id_cryptoctx, char *filename, const char *fsname, EVP_PKEY **retkey, const char *password) { EVP_PKEY *pkey = NULL; BIO *tmp = NULL; struct get_key_cb_data cb_data; int code; krb5_error_code retval; if (filename == NULL || retkey == NULL) return EINVAL; tmp = BIO_new(BIO_s_file()); if (tmp == NULL) return ENOMEM; code = BIO_read_filename(tmp, filename); if (code == 0) { retval = errno; goto cleanup; } cb_data.context = context; cb_data.id_cryptoctx = id_cryptoctx; cb_data.filename = filename; cb_data.fsname = fsname; cb_data.password = password; pkey = PEM_read_bio_PrivateKey(tmp, NULL, get_key_cb, &cb_data); if (pkey == NULL && !id_cryptoctx->defer_id_prompt) { retval = EIO; pkiDebug("failed to read private key from %s\n", filename); goto cleanup; } *retkey = pkey; retval = 0; cleanup: if (tmp != NULL) BIO_free(tmp); return retval; }
0
[ "CWE-119", "CWE-787" ]
krb5
fbb687db1088ddd894d975996e5f6a4252b9a2b4
126,699,562,885,423,570,000,000,000,000,000,000,000
40
Fix PKINIT cert matching data construction Rewrite X509_NAME_oneline_ex() and its call sites to use dynamic allocation and to perform proper error checking. ticket: 8617 target_version: 1.16 target_version: 1.15-next target_version: 1.14-next tags: pullup
int snd_component_add(struct snd_card *card, const char *component) { char *ptr; int len = strlen(component); ptr = strstr(card->components, component); if (ptr != NULL) { if (ptr[len] == '\0' || ptr[len] == ' ') /* already there */ return 1; } if (strlen(card->components) + 1 + len + 1 > sizeof(card->components)) { snd_BUG(); return -ENOMEM; } if (card->components[0] != '\0') strcat(card->components, " "); strcat(card->components, component); return 0; }
0
[ "CWE-416" ]
linux
2a3f7221acddfe1caa9ff09b3a8158c39b2fdeac
299,094,431,591,490,100,000,000,000,000,000,000,000
19
ALSA: core: Fix card races between register and disconnect There is a small race window in the card disconnection code that allows the registration of another card with the very same card id. This leads to a warning in procfs creation as caught by syzkaller. The problem is that we delete snd_cards and snd_cards_lock entries at the very beginning of the disconnection procedure. This makes the slot available to be assigned for another card object while the disconnection procedure is being processed. Then it becomes possible to issue a procfs registration with the existing file name although we check the conflict beforehand. The fix is simply to move the snd_cards and snd_cards_lock clearances at the end of the disconnection procedure. The references to these entries are merely either from the global proc files like /proc/asound/cards or from the card registration / disconnection, so it should be fine to shift at the very end. Reported-by: [email protected] Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) { kvm_inject_gp(vcpu, 0); return 1; } return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | EMULTYPE_COMPLETE_USER_EXIT); }
0
[ "CWE-476" ]
linux
55749769fe608fa3f4a075e42e89d237c8e37637
334,883,691,453,309,030,000,000,000,000,000,000,000
10
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
sys_sigsuspend(old_sigset_t mask, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs * regs) { sigset_t saveset; mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); saveset = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); REF_REG_RET = -EINTR; while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); regs->pc += 4; /* because sys_sigreturn decrements the pc */ if (do_signal(regs, &saveset)) { /* pc now points at signal handler. Need to decrement it because entry.S will increment it. */ regs->pc -= 4; return -EINTR; } } }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
78,691,316,140,670,790,000,000,000,000,000,000,000
27
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
free_perturb (char *p, size_t n) { if (__glibc_unlikely (perturb_byte)) memset (p, perturb_byte, n); }
0
[ "CWE-787" ]
glibc
d6db68e66dff25d12c3bc5641b60cbd7fb6ab44f
222,252,986,899,606,870,000,000,000,000,000,000,000
5
malloc: Mitigate null-byte overflow attacks * malloc/malloc.c (_int_free): Check for corrupt prev_size vs size. (malloc_consolidate): Likewise.
inline void invert_endianness(unsigned char* const, const cimg_ulong) {}
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
323,343,500,884,524,630,000,000,000,000,000,000,000
1
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
do_init (int event_fd, pid_t initial_pid, struct sock_fprog *seccomp_prog) { int initial_exit_status = 1; LockFile *lock; for (lock = lock_files; lock != NULL; lock = lock->next) { int fd = open (lock->path, O_RDONLY | O_CLOEXEC); if (fd == -1) die_with_error ("Unable to open lock file %s", lock->path); struct flock l = { .l_type = F_RDLCK, .l_whence = SEEK_SET, .l_start = 0, .l_len = 0 }; if (fcntl (fd, F_SETLK, &l) < 0) die_with_error ("Unable to lock file %s", lock->path); /* Keep fd open to hang on to lock */ lock->fd = fd; } /* Optionally bind our lifecycle to that of the caller */ handle_die_with_parent (); if (seccomp_prog != NULL && prctl (PR_SET_SECCOMP, SECCOMP_MODE_FILTER, seccomp_prog) != 0) die_with_error ("prctl(PR_SET_SECCOMP)"); while (TRUE) { pid_t child; int status; child = wait (&status); if (child == initial_pid && event_fd != -1) { uint64_t val; int res UNUSED; initial_exit_status = propagate_exit_status (status); val = initial_exit_status + 1; res = write (event_fd, &val, 8); /* Ignore res, if e.g. the parent died and closed event_fd we don't want to error out here */ } if (child == -1 && errno != EINTR) { if (errno != ECHILD) die_with_error ("init wait()"); break; } } /* Close FDs. */ for (lock = lock_files; lock != NULL; lock = lock->next) { if (lock->fd >= 0) { close (lock->fd); lock->fd = -1; } } return initial_exit_status; }
0
[ "CWE-20", "CWE-269" ]
bubblewrap
efc89e3b939b4bde42c10f065f6b7b02958ed50e
334,976,744,141,613,700,000,000,000,000,000,000,000
71
Don't create our own temporary mount point for pivot_root An attacker could pre-create /tmp/.bubblewrap-$UID and make it a non-directory, non-symlink (in which case mounting our tmpfs would fail, causing denial of service), or make it a symlink under their control (potentially allowing bad things if the protected_symlinks sysctl is not enabled). Instead, temporarily mount the tmpfs on a directory that we are sure exists and is not attacker-controlled. /tmp (the directory itself, not a subdirectory) will do. Fixes: #304 Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=923557 Signed-off-by: Simon McVittie <[email protected]> Closes: #305 Approved by: cgwalters
static __exit void hardware_unsetup(void) { int i; for (i = 0; i < VMX_BITMAP_NR; i++) free_page((unsigned long)vmx_bitmap[i]); free_kvm_area(); }
0
[ "CWE-284" ]
linux
727ba748e110b4de50d142edca9d6a9b7e6111d8
23,324,520,328,324,847,000,000,000,000,000,000,000
9
kvm: nVMX: Enforce cpl=0 for VMX instructions VMX instructions executed inside a L1 VM will always trigger a VM exit even when executed with cpl 3. This means we must perform the privilege check in software. Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks") Cc: [email protected] Signed-off-by: Felix Wilhelm <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
beep_print(netdissect_options *ndo, const u_char *bp, u_int length) { if (l_strnstart(ndo, "MSG", 4, (const char *)bp, length)) /* A REQuest */ ND_PRINT((ndo, " BEEP MSG")); else if (l_strnstart(ndo, "RPY ", 4, (const char *)bp, length)) ND_PRINT((ndo, " BEEP RPY")); else if (l_strnstart(ndo, "ERR ", 4, (const char *)bp, length)) ND_PRINT((ndo, " BEEP ERR")); else if (l_strnstart(ndo, "ANS ", 4, (const char *)bp, length)) ND_PRINT((ndo, " BEEP ANS")); else if (l_strnstart(ndo, "NUL ", 4, (const char *)bp, length)) ND_PRINT((ndo, " BEEP NUL")); else if (l_strnstart(ndo, "SEQ ", 4, (const char *)bp, length)) ND_PRINT((ndo, " BEEP SEQ")); else if (l_strnstart(ndo, "END", 4, (const char *)bp, length)) ND_PRINT((ndo, " BEEP END")); else ND_PRINT((ndo, " BEEP (payload or undecoded)")); }
0
[ "CWE-125", "CWE-787" ]
tcpdump
877b66b398518d9501513e0860c9f3a8acc70892
207,709,938,637,621,640,000,000,000,000,000,000,000
20
CVE-2017-13010/BEEP: Do bounds checking when comparing strings. This fixes a buffer over-read discovered by Brian 'geeknik' Carpenter. Add a test using the capture file supplied by the reporter(s).
vpnc_watch_cb (GPid pid, gint status, gpointer user_data) { NMVPNCPlugin *plugin = NM_VPNC_PLUGIN (user_data); NMVPNCPluginPrivate *priv = NM_VPNC_PLUGIN_GET_PRIVATE (plugin); guint error = 0; if (WIFEXITED (status)) { error = WEXITSTATUS (status); if (error != 0) _LOGW ("vpnc exited with error code %d", error); } else if (WIFSTOPPED (status)) _LOGW ("vpnc stopped unexpectedly with signal %d", WSTOPSIG (status)); else if (WIFSIGNALED (status)) _LOGW ("vpnc died with signal %d", WTERMSIG (status)); else _LOGW ("vpnc died from an unknown cause"); priv->watch_id = 0; /* Grab any remaining output, if any */ if (priv->out.channel) pipe_echo_finish (&priv->out); if (priv->err.channel) pipe_echo_finish (&priv->err); vpnc_cleanup (plugin, FALSE); remove_pidfile (plugin); /* Must be after data->state is set since signals use data->state */ switch (error) { case 2: /* Couldn't log in due to bad user/pass */ nm_vpn_service_plugin_failure (NM_VPN_SERVICE_PLUGIN (plugin), NM_VPN_PLUGIN_FAILURE_LOGIN_FAILED); break; case 1: /* Other error (couldn't bind to address, etc) */ nm_vpn_service_plugin_failure (NM_VPN_SERVICE_PLUGIN (plugin), NM_VPN_PLUGIN_FAILURE_CONNECT_FAILED); break; default: nm_vpn_service_plugin_disconnect (NM_VPN_SERVICE_PLUGIN (plugin), NULL); break; } }
0
[]
NetworkManager-vpnc
07ac18a32b4e361a27ef48ac757d36cbb46e8e12
180,456,258,513,445,000,000,000,000,000,000,000,000
43
service: disallow newlinies in configuration values (CVE-2018-10900) The vpnc configuration format doesn't allow those. vpnc(8): The values start exactly one space after the keywords, and run to the end of line. This lets you put any kind of weird character (except CR, LF and NUL) in your strings We have no choice but to reject them. If we didn't it would allow the user to inject arbitrary configuration directives with potential security implications. https://pulsesecurity.co.nz/advisories/NM-VPNC-Privesc Reported by: Denis Andzakovic
int init_crypto(struct crypt_device *ctx) { struct utsname uts; int r; r = crypt_random_init(ctx); if (r < 0) { log_err(ctx, _("Cannot initialize crypto RNG backend.")); return r; } r = crypt_backend_init(crypt_fips_mode()); if (r < 0) log_err(ctx, _("Cannot initialize crypto backend.")); if (!r && !_crypto_logged) { log_dbg(ctx, "Crypto backend (%s) initialized in cryptsetup library version %s.", crypt_backend_version(), PACKAGE_VERSION); if (!uname(&uts)) log_dbg(ctx, "Detected kernel %s %s %s.", uts.sysname, uts.release, uts.machine); _crypto_logged = 1; } return r; }
0
[ "CWE-345" ]
cryptsetup
0113ac2d889c5322659ad0596d4cfc6da53e356c
210,992,135,015,609,370,000,000,000,000,000,000,000
26
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack Fix possible attacks against data confidentiality through LUKS2 online reencryption extension crash recovery. An attacker can modify on-disk metadata to simulate decryption in progress with crashed (unfinished) reencryption step and persistently decrypt part of the LUKS device. This attack requires repeated physical access to the LUKS device but no knowledge of user passphrases. The decryption step is performed after a valid user activates the device with a correct passphrase and modified metadata. There are no visible warnings for the user that such recovery happened (except using the luksDump command). The attack can also be reversed afterward (simulating crashed encryption from a plaintext) with possible modification of revealed plaintext. The problem was caused by reusing a mechanism designed for actual reencryption operation without reassessing the security impact for new encryption and decryption operations. While the reencryption requires calculating and verifying both key digests, no digest was needed to initiate decryption recovery if the destination is plaintext (no encryption key). Also, some metadata (like encryption cipher) is not protected, and an attacker could change it. Note that LUKS2 protects visible metadata only when a random change occurs. It does not protect against intentional modification but such modification must not cause a violation of data confidentiality. The fix introduces additional digest protection of reencryption metadata. The digest is calculated from known keys and critical reencryption metadata. Now an attacker cannot create correct metadata digest without knowledge of a passphrase for used keyslots. For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); }
0
[ "CWE-119" ]
file
1859fdb4e67c49c463c4e0078054335cd46ba295
171,918,185,527,301,160,000,000,000,000,000,000,000
6
add more check found by cert's fuzzer.
g_deinit(void) { #if defined(_WIN32) WSACleanup(); #endif g_remove_dir(g_temp_base); }
1
[]
xrdp
cadad6e181d2a67698e5eb7cacd6b233ae29eb97
187,669,739,876,419,030,000,000,000,000,000,000,000
7
/tmp cleanup
TEST(UriSuite, TestQueryListPair) { testQueryListPairHelper("one+two+%26+three=%2B", "one two & three", "+"); testQueryListPairHelper("one=two=three", "one", "two=three", "one=two%3Dthree"); testQueryListPairHelper("one=two=three=four", "one", "two=three=four", "one=two%3Dthree%3Dfour"); }
0
[ "CWE-125" ]
uriparser
cef25028de5ff872c2e1f0a6c562eb3ea9ecbce4
126,650,330,207,058,750,000,000,000,000,000,000,000
5
Fix uriParse*Ex* out-of-bounds read
ngx_http_lua_copy_in_file_request_body(ngx_http_request_t *r) { ngx_temp_file_t *tf; ngx_http_request_body_t *body; tf = r->request_body->temp_file; if (!tf->persistent || !tf->clean) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "the request body was not read by ngx_lua"); return NGX_ERROR; } body = ngx_palloc(r->pool, sizeof(ngx_http_request_body_t)); if (body == NULL) { return NGX_ERROR; } ngx_memcpy(body, r->request_body, sizeof(ngx_http_request_body_t)); body->temp_file = ngx_palloc(r->pool, sizeof(ngx_temp_file_t)); if (body->temp_file == NULL) { return NGX_ERROR; } ngx_memcpy(body->temp_file, tf, sizeof(ngx_temp_file_t)); dd("file fd: %d", body->temp_file->file.fd); r->request_body = body; return NGX_OK; }
0
[ "CWE-444" ]
lua-nginx-module
9ab38e8ee35fc08a57636b1b6190dca70b0076fa
26,189,338,797,684,760,000,000,000,000,000,000,000
34
bugfix: prevented request smuggling in the ngx.location.capture API. Signed-off-by: Yichun Zhang (agentzh) <[email protected]>
httpServeObject(HTTPConnectionPtr connection) { HTTPRequestPtr request = connection->request; ObjectPtr object = request->object; int i = request->from / CHUNK_SIZE; int j = request->from % CHUNK_SIZE; int n, len, rc; int bufsize = CHUNK_SIZE; int condition_result; object->atime = current_time.tv_sec; objectMetadataChanged(object, 0); httpSetTimeout(connection, -1); if((request->error_code && relaxTransparency <= 0) || object->flags & OBJECT_INITIAL) { object->flags &= ~OBJECT_FAILED; unlockChunk(object, i); if(request->error_code) return httpClientRawError(connection, request->error_code, retainAtom(request->error_message), 0); else return httpClientRawError(connection, 500, internAtom("Object vanished."), 0); } if(!(object->flags & OBJECT_INPROGRESS) && object->code == 0) { if(object->flags & OBJECT_INITIAL) { unlockChunk(object, i); return httpClientRawError(connection, 503, internAtom("Error message lost"), 0); } else { unlockChunk(object, i); do_log(L_ERROR, "Internal proxy error: object has no code.\n"); return httpClientRawError(connection, 500, internAtom("Internal proxy error: " "object has no code"), 0); } } condition_result = httpCondition(object, request->condition); if(condition_result == CONDITION_FAILED) { unlockChunk(object, i); return httpClientRawError(connection, 412, internAtom("Precondition failed"), 0); } else if(condition_result == CONDITION_NOT_MODIFIED) { unlockChunk(object, i); return httpClientRawError(connection, 304, internAtom("Not modified"), 0); } objectFillFromDisk(object, request->from, (request->method == METHOD_HEAD || condition_result != CONDITION_MATCH) ? 0 : 1); if(((object->flags & OBJECT_LINEAR) && (object->requestor != connection->request)) || ((object->flags & OBJECT_SUPERSEDED) && !(object->flags & OBJECT_LINEAR))) { if(request->request) { request->request->request = NULL; request->request = NULL; request->object->requestor = NULL; } object = makeObject(OBJECT_HTTP, object->key, object->key_size, 1, 0, object->request, NULL); if(request->object->requestor == request) request->object->requestor = NULL; unlockChunk(request->object, i); releaseObject(request->object); request->object = NULL; if(object == NULL) { do_log(L_ERROR, "Couldn't allocate object."); return httpClientRawError(connection, 501, internAtom("Couldn't allocate object"), 1); } if(urlIsLocal(object->key, object->key_size)) { object->flags |= OBJECT_LOCAL; object->request = httpLocalRequest; } request->object = object; connection->flags &= ~CONN_WRITER; return httpClientNoticeRequest(request, 1); } if(object->flags & OBJECT_ABORTED) { unlockChunk(object, i); return httpClientNoticeError(request, object->code, retainAtom(object->message)); } if(connection->buf == NULL) connection->buf = get_chunk(); if(connection->buf == NULL) { unlockChunk(object, i); do_log(L_ERROR, "Couldn't allocate client buffer.\n"); connection->flags &= ~CONN_WRITER; httpClientFinish(connection, 1); return 1; } if(object->length >= 0 && request->to >= object->length) request->to = object->length; if(request->from > 0 || request->to >= 0) { if(request->method == METHOD_HEAD) { request->to = request->from; } else if(request->to < 0) { if(object->length >= 0) request->to = object->length; } } again: connection->len = 0; if((request->from <= 0 && request->to < 0) || request->method == METHOD_HEAD) { n = snnprintf(connection->buf, 0, bufsize, "HTTP/1.1 %d %s", object->code, atomString(object->message)); } else { if((object->length >= 0 && request->from >= object->length) || (request->to >= 0 && request->from >= request->to)) { unlockChunk(object, i); return httpClientRawError(connection, 416, internAtom("Requested range " "not satisfiable"), 0); } else { n = snnprintf(connection->buf, 0, bufsize, "HTTP/1.1 206 Partial content"); } } n = httpWriteObjectHeaders(connection->buf, n, bufsize, object, request->from, request->to); if(n < 0) goto fail; if(request->method != METHOD_HEAD && condition_result != CONDITION_NOT_MODIFIED && request->to < 0 && object->length < 0) { if(connection->version == HTTP_11) { connection->te = TE_CHUNKED; n = snnprintf(connection->buf, n, bufsize, "\r\nTransfer-Encoding: chunked"); } else { request->flags &= ~REQUEST_PERSISTENT; } } if(object->age < current_time.tv_sec) { n = snnprintf(connection->buf, n, bufsize, "\r\nAge: %d", (int)(current_time.tv_sec - object->age)); } n = snnprintf(connection->buf, n, bufsize, "\r\nConnection: %s", (request->flags & REQUEST_PERSISTENT) ? "keep-alive" : "close"); if(!(object->flags & OBJECT_LOCAL)) { if((object->flags & OBJECT_FAILED) && !proxyOffline) { n = snnprintf(connection->buf, n, bufsize, "\r\nWarning: 111 %s:%d Revalidation failed", proxyName->string, proxyPort); if(request->error_code) n = snnprintf(connection->buf, n, bufsize, " (%d %s)", request->error_code, atomString(request->error_message)); object->flags &= ~OBJECT_FAILED; } else if(proxyOffline && objectMustRevalidate(object, &request->cache_control)) { n = snnprintf(connection->buf, n, bufsize, "\r\nWarning: 112 %s:%d Disconnected operation", proxyName->string, proxyPort); } else if(objectIsStale(object, &request->cache_control)) { n = snnprintf(connection->buf, n, bufsize, "\r\nWarning: 110 %s:%d Object is stale", proxyName->string, proxyPort); } else if(object->expires < 0 && object->max_age < 0 && object->age < current_time.tv_sec - 24 * 3600) { n = snnprintf(connection->buf, n, bufsize, "\r\nWarning: 113 %s:%d Heuristic expiration", proxyName->string, proxyPort); } } n = snnprintf(connection->buf, n, bufsize, "\r\n\r\n"); if(n < 0) goto fail; connection->offset = request->from; if(request->method == METHOD_HEAD || condition_result == CONDITION_NOT_MODIFIED || (object->flags & OBJECT_ABORTED)) { len = 0; } else { if(i < object->numchunks) { if(object->chunks[i].size <= j) len = 0; else len = object->chunks[i].size - j; } else { len = 0; } if(request->to >= 0) len = MIN(len, request->to - request->from); } connection->offset = request->from; httpSetTimeout(connection, clientTimeout); do_log(D_CLIENT_DATA, "Serving on 0x%lx for 0x%lx: offset %d len %d\n", (unsigned long)connection, (unsigned long)object, connection->offset, len); do_stream_h(IO_WRITE | (connection->te == TE_CHUNKED && len > 0 ? IO_CHUNKED : 0), connection->fd, 0, connection->buf, n, object->chunks[i].data + j, len, httpServeObjectStreamHandler, connection); return 1; fail: rc = 0; connection->len = 0; if(!(connection->flags & CONN_BIGBUF)) rc = httpConnectionBigify(connection); if(rc > 0) { bufsize = bigBufferSize; goto again; } unlockChunk(object, i); return httpClientRawError(connection, 500, rc == 0 ? internAtom("No space for headers") : internAtom("Couldn't allocate big buffer"), 0); }
0
[ "CWE-617" ]
polipo
0e2b44af619e46e365971ea52b97457bc0778cd3
32,525,539,936,899,580,000,000,000,000,000,000,000
249
Try to read POST requests to local configuration interface correctly.
fep_client_send_text (FepClient *client, const char *text) { FepControlMessage message; message.command = FEP_CONTROL_SEND_TEXT; _fep_control_message_alloc_args (&message, 1); _fep_control_message_write_string_arg (&message, 0, text, strlen (text) + 1); if (client->filter_running) client->messages = _fep_append_control_message (client->messages, &message); else _fep_write_control_message (client->control, &message); _fep_control_message_free_args (&message); }
0
[ "CWE-264" ]
libfep
293d9d3f7565f01a9dc40b53259886832eaa2ace
218,549,732,027,379,980,000,000,000,000,000,000,000
14
Don't use abstract Unix domain sockets
static void FVMenuCondense(GWindow gw, struct gmenuitem *UNUSED(mi), GEvent *UNUSED(e)) { FontView *fv = (FontView *) GDrawGetUserData(gw); CondenseExtendDlg(fv,NULL); }
0
[ "CWE-119", "CWE-787" ]
fontforge
626f751752875a0ddd74b9e217b6f4828713573c
300,598,283,612,231,800,000,000,000,000,000,000,000
4
Warn users before discarding their unsaved scripts (#3852) * Warn users before discarding their unsaved scripts This closes #3846.
getkey_next (getkey_ctx_t ctx, PKT_public_key *pk, kbnode_t *ret_keyblock) { int rc; /* Fixme: Make sure this is proper gpg_error */ rc = lookup (ctx, ret_keyblock, ctx->want_secret); if (!rc && pk && ret_keyblock) pk_from_block (ctx, pk, *ret_keyblock); return rc; }
0
[ "CWE-310" ]
gnupg
4bde12206c5bf199dc6e12a74af8da4558ba41bf
116,907,403,954,047,520,000,000,000,000,000,000,000
10
gpg: Distinguish between missing and cleared key flags. * include/cipher.h (PUBKEY_USAGE_NONE): New. * g10/getkey.c (parse_key_usage): Set new flag. -- We do not want to use the default capabilities (derived from the algorithm) if any key flags are given in a signature. Thus if key flags are used in any way, the default key capabilities are never used. This allows to create a key with key flags set to all zero so it can't be used. This better reflects common sense.
p11_dl_close (void *dl) { #ifdef OS_WIN32 FreeLibrary (dl); #else (void) dlclose (dl); #endif }
0
[ "CWE-190" ]
p11-kit
bd670b1d4984b27d6a397b9ddafaf89ab26e4e7f
116,684,648,524,086,540,000,000,000,000,000,000,000
8
Follow-up to arithmetic overflow fix Check if nmemb is zero in p11_rpc_message_alloc_extra_array to avoid a division by zero trap. Additionally, change the reallocarray compatibility shim so that it won't assert when resizing an array to zero, and add the same nmemb != 0 check there.
bool localAddressRestored() const override { return local_address_restored_; }
0
[ "CWE-400" ]
envoy
542f84c66e9f6479bc31c6f53157c60472b25240
84,252,542,206,634,210,000,000,000,000,000,000,000
1
overload: Runtime configurable global connection limits (#147) Signed-off-by: Tony Allen <[email protected]>
MonoArray * mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig) { g_assert_not_reached (); return NULL;
0
[ "CWE-20" ]
mono
4905ef1130feb26c3150b28b97e4a96752e0d399
195,726,742,713,479,760,000,000,000,000,000,000,000
5
Handle invalid instantiation of generic methods. * verify.c: Add new function to internal verifier API to check method instantiations. * reflection.c (mono_reflection_bind_generic_method_parameters): Check the instantiation before returning it. Fixes #655847
prepare_fk_prelocking_list(THD *thd, Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, bool *need_prelocking, uint8 op) { List <FOREIGN_KEY_INFO> fk_list; List_iterator<FOREIGN_KEY_INFO> fk_list_it(fk_list); FOREIGN_KEY_INFO *fk; Query_arena *arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); table_list->table->file->get_parent_foreign_key_list(thd, &fk_list); if (thd->is_error()) { if (arena) thd->restore_active_arena(arena, &backup); return TRUE; } *need_prelocking= TRUE; while ((fk= fk_list_it++)) { // FK_OPTION_RESTRICT and FK_OPTION_NO_ACTION only need read access static bool can_write[]= { true, false, true, true, false, true }; thr_lock_type lock_type; if ((op & (1 << TRG_EVENT_DELETE) && can_write[fk->delete_method]) || (op & (1 << TRG_EVENT_UPDATE) && can_write[fk->update_method])) lock_type= TL_WRITE_ALLOW_WRITE; else lock_type= TL_READ; if (table_already_fk_prelocked(prelocking_ctx->query_tables, fk->foreign_db, fk->foreign_table, lock_type)) continue; TABLE_LIST *tl= (TABLE_LIST *) thd->alloc(sizeof(TABLE_LIST)); tl->init_one_table_for_prelocking(fk->foreign_db->str, fk->foreign_db->length, fk->foreign_table->str, fk->foreign_table->length, NULL, lock_type, false, table_list->belong_to_view, op, &prelocking_ctx->query_tables_last); } if (arena) thd->restore_active_arena(arena, &backup); return FALSE; }
0
[]
server
0168d1eda30dad4b517659422e347175eb89e923
32,291,134,261,796,070,000,000,000,000,000,000,000
49
MDEV-25766 Unused CTE lead to a crash in find_field_in_tables/find_order_in_list Do not assume that subquery Item always present.
PackLinuxElf64::elf_find_dynamic(unsigned int key) const { Elf64_Dyn const *dynp= dynseg; if (dynp) for (; (unsigned)((char const *)dynp - (char const *)dynseg) < sz_dynseg && Elf64_Dyn::DT_NULL!=dynp->d_tag; ++dynp) if (get_te64(&dynp->d_tag)==key) { upx_uint64_t const t= elf_get_offset_from_address(get_te64(&dynp->d_val)); if (t) { return &((unsigned char const *)file_image)[(size_t)t]; } break; } return 0; }
1
[ "CWE-476", "CWE-190" ]
upx
8be9da8280dfa69d5df4417d4d81bda1cab78010
140,163,033,460,328,620,000,000,000,000,000,000,000
14
Avoid bogus values in PT_DYNAMIC segment. Detect duplicate DT_*. Detect out-of-bounds hashtab and gashtab. Detect missing DT_REL, DT_RELA. Detect out-of-bounds d_val. https://github.com/upx/upx/issues/317 modified: p_lx_elf.cpp
QPDFObjectHandle newName(std::string const& name) { return QPDFObjectHandle::newName(name); }
0
[ "CWE-125" ]
qpdf
1868a10f8b06631362618bfc85ca8646da4b4b71
214,765,649,635,213,500,000,000,000,000,000,000,000
4
Replace all atoi calls with QUtil::string_to_int The latter catches underflow/overflow.
heap_add_entry(struct archive_read *a, struct heap_queue *heap, struct file_info *file, uint64_t key) { uint64_t file_key, parent_key; int hole, parent; /* Expand our pending files list as necessary. */ if (heap->used >= heap->allocated) { struct file_info **new_pending_files; int new_size = heap->allocated * 2; if (heap->allocated < 1024) new_size = 1024; /* Overflow might keep us from growing the list. */ if (new_size <= heap->allocated) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } new_pending_files = (struct file_info **) malloc(new_size * sizeof(new_pending_files[0])); if (new_pending_files == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } memcpy(new_pending_files, heap->files, heap->allocated * sizeof(new_pending_files[0])); if (heap->files != NULL) free(heap->files); heap->files = new_pending_files; heap->allocated = new_size; } file_key = file->key = key; /* * Start with hole at end, walk it up tree to find insertion point. */ hole = heap->used++; while (hole > 0) { parent = (hole - 1)/2; parent_key = heap->files[parent]->key; if (file_key >= parent_key) { heap->files[hole] = file; return (ARCHIVE_OK); } /* Move parent into hole <==> move hole up tree. */ heap->files[hole] = heap->files[parent]; hole = parent; } heap->files[0] = file; return (ARCHIVE_OK); }
0
[ "CWE-190" ]
libarchive
3ad08e01b4d253c66ae56414886089684155af22
141,344,770,209,032,780,000,000,000,000,000,000,000
55
Issue 717: Fix integer overflow when computing location of volume descriptor The multiplication here defaulted to 'int' but calculations of file positions should always use int64_t. A simple cast suffices to fix this since the base location is always 32 bits for ISO, so multiplying by the sector size will never overflow a 64-bit integer.
static inline void unmap_mapping_range_tree(struct rb_root *root, struct zap_details *details) { struct vm_area_struct *vma; pgoff_t vba, vea, zba, zea; vma_interval_tree_foreach(vma, root, details->first_index, details->last_index) { vba = vma->vm_pgoff; vea = vba + vma_pages(vma) - 1; zba = details->first_index; if (zba < vba) zba = vba; zea = details->last_index; if (zea > vea) zea = vea; unmap_mapping_range_vma(vma, ((zba - vba) << PAGE_SHIFT) + vma->vm_start, ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, details); } }
0
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
105,888,981,241,522,920,000,000,000,000,000,000,000
24
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <[email protected]> Original-patch-by: Michal Hocko <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Michal Hocko <[email protected]> Tested-by: Helge Deller <[email protected]> # parisc Signed-off-by: Linus Torvalds <[email protected]>
static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) { unsigned int size; switch (type) { case DESC_TSS: size = sizeof(tss_desc); break; case DESC_LDT: size = sizeof(ldt_desc); break; default: size = sizeof(struct desc_struct); break; } memcpy(&gdt[entry], desc, size); }
0
[ "CWE-119" ]
linux-2.6
5ac37f87ff18843aabab84cf75b2f8504c2d81fe
13,869,627,748,976,757,000,000,000,000,000,000,000
17
x86: fix ldt limit for 64 bit Fix size of LDT entries. On x86-64, ldt_desc is a double-sized descriptor. Signed-off-by: Michael Karcher <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
static int uac2_ctl_value_size(int val_type) { switch (val_type) { case USB_MIXER_S32: case USB_MIXER_U32: return 4; case USB_MIXER_S16: case USB_MIXER_U16: return 2; default: return 1; } return 0; /* unreachable */ }
0
[ "CWE-416", "CWE-787" ]
linux
124751d5e63c823092060074bd0abaae61aaa9c4
123,696,349,115,851,480,000,000,000,000,000,000,000
14
ALSA: usb-audio: Kill stray URB at exiting USB-audio driver may leave a stray URB for the mixer interrupt when it exits by some error during probe. This leads to a use-after-free error as spotted by syzkaller like: ================================================================== BUG: KASAN: use-after-free in snd_usb_mixer_interrupt+0x604/0x6f0 Call Trace: <IRQ> __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 print_address_description+0x78/0x280 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 kasan_report+0x23d/0x350 mm/kasan/report.c:409 __asan_report_load8_noabort+0x19/0x20 mm/kasan/report.c:430 snd_usb_mixer_interrupt+0x604/0x6f0 sound/usb/mixer.c:2490 __usb_hcd_giveback_urb+0x2e0/0x650 drivers/usb/core/hcd.c:1779 .... Allocated by task 1484: save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551 kmem_cache_alloc_trace+0x11e/0x2d0 mm/slub.c:2772 kmalloc ./include/linux/slab.h:493 kzalloc ./include/linux/slab.h:666 snd_usb_create_mixer+0x145/0x1010 sound/usb/mixer.c:2540 create_standard_mixer_quirk+0x58/0x80 sound/usb/quirks.c:516 snd_usb_create_quirk+0x92/0x100 sound/usb/quirks.c:560 create_composite_quirk+0x1c4/0x3e0 sound/usb/quirks.c:59 snd_usb_create_quirk+0x92/0x100 sound/usb/quirks.c:560 usb_audio_probe+0x1040/0x2c10 sound/usb/card.c:618 .... Freed by task 1484: save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 save_stack+0x43/0xd0 mm/kasan/kasan.c:447 set_track mm/kasan/kasan.c:459 kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:524 slab_free_hook mm/slub.c:1390 slab_free_freelist_hook mm/slub.c:1412 slab_free mm/slub.c:2988 kfree+0xf6/0x2f0 mm/slub.c:3919 snd_usb_mixer_free+0x11a/0x160 sound/usb/mixer.c:2244 snd_usb_mixer_dev_free+0x36/0x50 sound/usb/mixer.c:2250 __snd_device_free+0x1ff/0x380 sound/core/device.c:91 snd_device_free_all+0x8f/0xe0 sound/core/device.c:244 snd_card_do_free sound/core/init.c:461 release_card_device+0x47/0x170 sound/core/init.c:181 device_release+0x13f/0x210 drivers/base/core.c:814 .... Actually such a URB is killed properly at disconnection when the device gets probed successfully, and what we need is to apply it for the error-path, too. In this patch, we apply snd_usb_mixer_disconnect() at releasing. Also introduce a new flag, disconnected, to struct usb_mixer_interface for not performing the disconnection procedure twice. Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0 = xcr; u64 old_xcr0 = vcpu->arch.xcr0; u64 valid_bits; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; /* * Do not allow the guest to set bits that we do not support * saving. However, xcr0 bit 0 is always set, even if the * emulated CPU does not support XSAVE (see fx_init). */ valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; if (xcr0 & ~valid_bits) return 1; if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR))) return 1; kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK) kvm_update_cpuid(vcpu); return 0; }
0
[]
kvm
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
197,823,933,592,469,100,000,000,000,000,000,000,000
33
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: [email protected] Signed-off-by: Nadav Amit <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
xmlRelaxNGParseImportRefs(xmlRelaxNGParserCtxtPtr ctxt, xmlRelaxNGGrammarPtr grammar) { if ((ctxt == NULL) || (grammar == NULL) || (ctxt->grammar == NULL)) return(-1); if (grammar->refs == NULL) return(0); if (ctxt->grammar->refs == NULL) ctxt->grammar->refs = xmlHashCreate(10); if (ctxt->grammar->refs == NULL) { xmlRngPErr(ctxt, NULL, XML_RNGP_REF_CREATE_FAILED, "Could not create references hash\n", NULL, NULL); return(-1); } xmlHashScan(grammar->refs, xmlRelaxNGParseImportRef, ctxt); return(0); }
0
[ "CWE-134" ]
libxml2
502f6a6d08b08c04b3ddfb1cd21b2f699c1b7f5b
319,046,732,222,633,780,000,000,000,000,000,000,000
16
More format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 adds a new xmlEscapeFormatString() function to escape composed format strings
static int rtsx_usb_ms_drv_probe(struct platform_device *pdev) { struct memstick_host *msh; struct rtsx_usb_ms *host; struct rtsx_ucr *ucr; int err; ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); if (!ucr) return -ENXIO; dev_dbg(&(pdev->dev), "Realtek USB Memstick controller found\n"); msh = memstick_alloc_host(sizeof(*host), &pdev->dev); if (!msh) return -ENOMEM; host = memstick_priv(msh); host->ucr = ucr; host->msh = msh; host->pdev = pdev; host->power_mode = MEMSTICK_POWER_OFF; platform_set_drvdata(pdev, host); mutex_init(&host->host_mutex); INIT_WORK(&host->handle_req, rtsx_usb_ms_handle_req); INIT_DELAYED_WORK(&host->poll_card, rtsx_usb_ms_poll_card); msh->request = rtsx_usb_ms_request; msh->set_param = rtsx_usb_ms_set_param; msh->caps = MEMSTICK_CAP_PAR4; pm_runtime_get_noresume(ms_dev(host)); pm_runtime_set_active(ms_dev(host)); pm_runtime_enable(ms_dev(host)); err = memstick_add_host(msh); if (err) goto err_out; pm_runtime_put(ms_dev(host)); return 0; err_out: memstick_free_host(msh); pm_runtime_disable(ms_dev(host)); pm_runtime_put_noidle(ms_dev(host)); return err; }
1
[ "CWE-416" ]
linux
42933c8aa14be1caa9eda41f65cde8a3a95d3e39
64,085,819,692,496,640,000,000,000,000,000,000,000
51
memstick: rtsx_usb_ms: fix UAF This patch fixes the following issues: 1. memstick_free_host() will free the host, so the use of ms_dev(host) after it will be a problem. To fix this, move memstick_free_host() after when we are done with ms_dev(host). 2. In rtsx_usb_ms_drv_remove(), pm need to be disabled before we remove and free host otherwise memstick_check will be called and UAF will happen. [ 11.351173] BUG: KASAN: use-after-free in rtsx_usb_ms_drv_remove+0x94/0x140 [rtsx_usb_ms] [ 11.357077] rtsx_usb_ms_drv_remove+0x94/0x140 [rtsx_usb_ms] [ 11.357376] platform_remove+0x2a/0x50 [ 11.367531] Freed by task 298: [ 11.368537] kfree+0xa4/0x2a0 [ 11.368711] device_release+0x51/0xe0 [ 11.368905] kobject_put+0xa2/0x120 [ 11.369090] rtsx_usb_ms_drv_remove+0x8c/0x140 [rtsx_usb_ms] [ 11.369386] platform_remove+0x2a/0x50 [ 12.038408] BUG: KASAN: use-after-free in __mutex_lock.isra.0+0x3ec/0x7c0 [ 12.045432] mutex_lock+0xc9/0xd0 [ 12.046080] memstick_check+0x6a/0x578 [memstick] [ 12.046509] process_one_work+0x46d/0x750 [ 12.052107] Freed by task 297: [ 12.053115] kfree+0xa4/0x2a0 [ 12.053272] device_release+0x51/0xe0 [ 12.053463] kobject_put+0xa2/0x120 [ 12.053647] rtsx_usb_ms_drv_remove+0xc4/0x140 [rtsx_usb_ms] [ 12.053939] platform_remove+0x2a/0x50 Signed-off-by: Tong Zhang <[email protected]> Co-developed-by: Ulf Hansson <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Ulf Hansson <[email protected]>
_cmp_int_strings(const void **_a, const void **_b) { const char *a = *_a, *b = *_b; int ai = (int)tor_parse_long(a, 10, 1, INT_MAX, NULL, NULL); int bi = (int)tor_parse_long(b, 10, 1, INT_MAX, NULL, NULL); if (ai<bi) { return -1; } else if (ai==bi) { if (ai == 0) /* Parsing failed. */ return strcmp(a, b); return 0; } else { return 1; } }
0
[]
tor
973c18bf0e84d14d8006a9ae97fde7f7fb97e404
268,421,053,895,563,040,000,000,000,000,000,000,000
15
Fix assertion failure in tor_timegm. Fixes bug 6811.
static void gf_m2ts_reset_sections(GF_List *sections) { u32 count; GF_M2TS_Section *section; //GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[MPEG-2 TS] Deleting sections\n")); count = gf_list_count(sections); while (count) { section = gf_list_get(sections, 0); gf_list_rem(sections, 0); if (section->data) gf_free(section->data); gf_free(section); count--; } }
0
[ "CWE-416", "CWE-125" ]
gpac
1ab4860609f2e7a35634930571e7d0531297e090
200,680,416,727,601,100,000,000,000,000,000,000,000
15
fixed potential crash on PMT IOD parse - cf #1268 #1269
static void kgdb_initial_breakpoint(void) { kgdb_break_asap = 0; pr_crit("Waiting for connection from remote gdb...\n"); kgdb_breakpoint(); }
0
[ "CWE-787" ]
linux
eadb2f47a3ced5c64b23b90fd2a3463f63726066
29,057,374,428,572,710,000,000,000,000,000,000,000
7
lockdown: also lock down previous kgdb use KGDB and KDB allow read and write access to kernel memory, and thus should be restricted during lockdown. An attacker with access to a serial port (for example, via a hypervisor console, which some cloud vendors provide over the network) could trigger the debugger so it is important that the debugger respect the lockdown mode when/if it is triggered. Fix this by integrating lockdown into kdb's existing permissions mechanism. Unfortunately kgdb does not have any permissions mechanism (although it certainly could be added later) so, for now, kgdb is simply and brutally disabled by immediately exiting the gdb stub without taking any action. For lockdowns established early in the boot (e.g. the normal case) then this should be fine but on systems where kgdb has set breakpoints before the lockdown is enacted than "bad things" will happen. CVE: CVE-2022-21499 Co-developed-by: Stephen Brennan <[email protected]> Signed-off-by: Stephen Brennan <[email protected]> Reviewed-by: Douglas Anderson <[email protected]> Signed-off-by: Daniel Thompson <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
aptcTimerHandler(unsigned long arg) { u32 numbytes; u32 throughput; struct ar6_softc *ar; int status; ar = (struct ar6_softc *)arg; A_ASSERT(ar != NULL); A_ASSERT(!timer_pending(&aptcTimer)); AR6000_SPIN_LOCK(&ar->arLock, 0); /* Get the number of bytes transferred */ numbytes = aptcTR.bytesTransmitted + aptcTR.bytesReceived; aptcTR.bytesTransmitted = aptcTR.bytesReceived = 0; /* Calculate and decide based on throughput thresholds */ throughput = ((numbytes * 8)/APTC_TRAFFIC_SAMPLING_INTERVAL); /* Kbps */ if (throughput < APTC_LOWER_THROUGHPUT_THRESHOLD) { /* Enable Sleep and delete the timer */ A_ASSERT(ar->arWmiReady == true); AR6000_SPIN_UNLOCK(&ar->arLock, 0); status = wmi_powermode_cmd(ar->arWmi, REC_POWER); AR6000_SPIN_LOCK(&ar->arLock, 0); A_ASSERT(status == 0); aptcTR.timerScheduled = false; } else { A_TIMEOUT_MS(&aptcTimer, APTC_TRAFFIC_SAMPLING_INTERVAL, 0); } AR6000_SPIN_UNLOCK(&ar->arLock, 0); }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
246,241,404,853,348,250,000,000,000,000,000,000,000
33
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
CImg<T>& diffusion_tensors(const float sharpness=0.7f, const float anisotropy=0.6f, const float alpha=0.6f, const float sigma=1.1f, const bool is_sqrt=false) { CImg<Tfloat> res; const float nsharpness = std::max(sharpness,1e-5f), power1 = (is_sqrt?0.5f:1)*nsharpness, power2 = power1/(1e-7f + 1 - anisotropy); blur(alpha).normalize(0,(T)255); if (_depth>1) { // 3D get_structure_tensors().move_to(res).blur(sigma); cimg_pragma_openmp(parallel for cimg_openmp_collapse(2) cimg_openmp_if(_width>=(cimg_openmp_sizefactor)*256 && _height*_depth>=(cimg_openmp_sizefactor)*256)) cimg_forYZ(*this,y,z) { Tfloat *ptrd0 = res.data(0,y,z,0), *ptrd1 = res.data(0,y,z,1), *ptrd2 = res.data(0,y,z,2), *ptrd3 = res.data(0,y,z,3), *ptrd4 = res.data(0,y,z,4), *ptrd5 = res.data(0,y,z,5); CImg<floatT> val(3), vec(3,3); cimg_forX(*this,x) { res.get_tensor_at(x,y,z).symmetric_eigen(val,vec); const float _l1 = val[2], _l2 = val[1], _l3 = val[0], l1 = _l1>0?_l1:0, l2 = _l2>0?_l2:0, l3 = _l3>0?_l3:0, ux = vec(0,0), uy = vec(0,1), uz = vec(0,2), vx = vec(1,0), vy = vec(1,1), vz = vec(1,2), wx = vec(2,0), wy = vec(2,1), wz = vec(2,2), n1 = (float)std::pow(1 + l1 + l2 + l3,-power1), n2 = (float)std::pow(1 + l1 + l2 + l3,-power2); *(ptrd0++) = n1*(ux*ux + vx*vx) + n2*wx*wx; *(ptrd1++) = n1*(ux*uy + vx*vy) + n2*wx*wy; *(ptrd2++) = n1*(ux*uz + vx*vz) + n2*wx*wz; *(ptrd3++) = n1*(uy*uy + vy*vy) + n2*wy*wy; *(ptrd4++) = n1*(uy*uz + vy*vz) + n2*wy*wz; *(ptrd5++) = n1*(uz*uz + vz*vz) + n2*wz*wz; } } } else { // for 2D images get_structure_tensors().move_to(res).blur(sigma); cimg_pragma_openmp(parallel for cimg_openmp_if(_width>=(cimg_openmp_sizefactor)*256 && _height>=(cimg_openmp_sizefactor)*256)) cimg_forY(*this,y) { Tfloat *ptrd0 = res.data(0,y,0,0), *ptrd1 = res.data(0,y,0,1), *ptrd2 = res.data(0,y,0,2); CImg<floatT> val(2), vec(2,2); cimg_forX(*this,x) { res.get_tensor_at(x,y).symmetric_eigen(val,vec); const float _l1 = val[1], _l2 = val[0], l1 = _l1>0?_l1:0, l2 = _l2>0?_l2:0, ux = vec(1,0), uy = vec(1,1), vx = vec(0,0), vy = vec(0,1), n1 = (float)std::pow(1 + l1 + l2,-power1), n2 = (float)std::pow(1 + l1 + l2,-power2); *(ptrd0++) = n1*ux*ux + n2*vx*vx; *(ptrd1++) = n1*ux*uy + n2*vx*vy; *(ptrd2++) = n1*uy*uy + n2*vy*vy; } } } return res.move_to(*this); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
123,784,313,740,204,020,000,000,000,000,000,000,000
60
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
static int kvm_probe_user_return_msr(u32 msr) { u64 val; int ret; preempt_disable(); ret = rdmsrl_safe(msr, &val); if (ret) goto out; ret = wrmsrl_safe(msr, val); out: preempt_enable(); return ret; }
0
[ "CWE-476" ]
linux
55749769fe608fa3f4a075e42e89d237c8e37637
3,186,523,772,328,528,400,000,000,000,000,000,000
14
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static __exit void hardware_unsetup(void) { free_kvm_area(); }
0
[ "CWE-20" ]
linux-2.6
16175a796d061833aacfbd9672235f2d2725df65
165,060,749,802,121,310,000,000,000,000,000,000,000
4
KVM: VMX: Don't allow uninhibited access to EFER on i386 vmx_set_msr() does not allow i386 guests to touch EFER, but they can still do so through the default: label in the switch. If they set EFER_LME, they can oops the host. Fix by having EFER access through the normal channel (which will check for EFER_LME) even on i386. Reported-and-tested-by: Benjamin Gilbert <[email protected]> Cc: [email protected] Signed-off-by: Avi Kivity <[email protected]>
RGWOp *RGWHandler_REST_Obj_S3::op_put() { if (is_acl_op()) { return new RGWPutACLs_ObjStore_S3; } else if (is_tagging_op()) { return new RGWPutObjTags_ObjStore_S3; } if (s->init_state.src_bucket.empty()) return new RGWPutObj_ObjStore_S3; else return new RGWCopyObj_ObjStore_S3; }
0
[ "CWE-79" ]
ceph
ba0790a01ba5252db1ebc299db6e12cd758d0ff9
130,239,386,196,070,580,000,000,000,000,000,000,000
13
rgw: reject unauthenticated response-header actions Signed-off-by: Matt Benjamin <[email protected]> Reviewed-by: Casey Bodley <[email protected]> (cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400)
static int tls_construct_cke_srp(SSL *s, unsigned char **p, int *len, int *al) { #ifndef OPENSSL_NO_SRP if (s->srp_ctx.A != NULL) { /* send off the data */ *len = BN_num_bytes(s->srp_ctx.A); s2n(*len, *p); BN_bn2bin(s->srp_ctx.A, *p); *len += 2; } else { SSLerr(SSL_F_TLS_CONSTRUCT_CKE_SRP, ERR_R_INTERNAL_ERROR); return 0; } OPENSSL_free(s->session->srp_username); s->session->srp_username = OPENSSL_strdup(s->srp_ctx.login); if (s->session->srp_username == NULL) { SSLerr(SSL_F_TLS_CONSTRUCT_CKE_SRP, ERR_R_MALLOC_FAILURE); return 0; } return 1; #else SSLerr(SSL_F_TLS_CONSTRUCT_CKE_SRP, ERR_R_INTERNAL_ERROR); *al = SSL_AD_INTERNAL_ERROR; return 0; #endif }
0
[ "CWE-476" ]
openssl
efbe126e3ebb9123ac9d058aa2bb044261342aaa
60,504,933,674,098,870,000,000,000,000,000,000,000
27
Fix missing NULL checks in CKE processing Reviewed-by: Rich Salz <[email protected]>
int kvm_emulate_mwait(struct kvm_vcpu *vcpu) { pr_warn_once("kvm: MWAIT instruction emulated as NOP!\n"); return kvm_emulate_as_nop(vcpu); }
0
[ "CWE-476" ]
linux
55749769fe608fa3f4a075e42e89d237c8e37637
210,641,963,107,107,000,000,000,000,000,000,000,000
5
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
local void check_match(s, start, match, length) deflate_state *s; IPos start, match; int length; { /* check that the match is indeed a match */ if (zmemcmp(s->window + match, s->window + start, length) != EQUAL) { fprintf(stderr, " start %u, match %u, length %d\n", start, match, length); do { fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); } while (--length != 0); z_error("invalid match"); } if (z_verbose > 1) { fprintf(stderr,"\\[%d,%d]", start-match, length); do { putc(s->window[start++], stderr); } while (--length != 0); } }
0
[ "CWE-284", "CWE-787" ]
zlib
5c44459c3b28a9bd3283aaceab7c615f8020c531
129,085,934,202,529,460,000,000,000,000,000,000,000
20
Fix a bug that can crash deflate on some input when using Z_FIXED. This bug was reported by Danilo Ramos of Eideticom, Inc. It has lain in wait 13 years before being found! The bug was introduced in zlib 1.2.2.2, with the addition of the Z_FIXED option. That option forces the use of fixed Huffman codes. For rare inputs with a large number of distant matches, the pending buffer into which the compressed data is written can overwrite the distance symbol table which it overlays. That results in corrupted output due to invalid distances, and can result in out-of-bound accesses, crashing the application. The fix here combines the distance buffer and literal/length buffers into a single symbol buffer. Now three bytes of pending buffer space are opened up for each literal or length/distance pair consumed, instead of the previous two bytes. This assures that the pending buffer cannot overwrite the symbol table, since the maximum fixed code compressed length/distance is 31 bits, and since there are four bytes of pending space for every three bytes of symbol space.
onig_set_capture_num_limit(int num) { if (num < 0) return -1; MaxCaptureNum = num; return 0; }
0
[ "CWE-787" ]
oniguruma
f015fbdd95f76438cd86366467bb2b39870dd7c6
207,765,199,173,373,700,000,000,000,000,000,000,000
7
fix #55 : Byte value expressed in octal must be smaller than 256
static int pcd_detect(void) { char id[18]; int k, unit; struct pcd_unit *cd; printk("%s: %s version %s, major %d, nice %d\n", name, name, PCD_VERSION, major, nice); par_drv = pi_register_driver(name); if (!par_drv) { pr_err("failed to register %s driver\n", name); return -1; } k = 0; if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */ cd = pcd; if (pi_init(cd->pi, 1, -1, -1, -1, -1, -1, pcd_buffer, PI_PCD, verbose, cd->name)) { if (!pcd_probe(cd, -1, id) && cd->disk) { cd->present = 1; k++; } else pi_release(cd->pi); } } else { for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { int *conf = *drives[unit]; if (!conf[D_PRT]) continue; if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD], conf[D_UNI], conf[D_PRO], conf[D_DLY], pcd_buffer, PI_PCD, verbose, cd->name)) continue; if (!pcd_probe(cd, conf[D_SLV], id) && cd->disk) { cd->present = 1; k++; } else pi_release(cd->pi); } } if (k) return 0; printk("%s: No CD-ROM drive found\n", name); for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { blk_cleanup_queue(cd->disk->queue); cd->disk->queue = NULL; blk_mq_free_tag_set(&cd->tag_set); put_disk(cd->disk); } pi_unregister_driver(par_drv); return -1; }
1
[ "CWE-476" ]
linux
f0d1762554014ce0ae347b9f0d088f2c157c8c72
137,177,783,197,349,480,000,000,000,000,000,000,000
55
paride/pcd: Fix potential NULL pointer dereference and mem leak Syzkaller report this: pcd: pcd version 1.07, major 46, nice 0 pcd0: Autoprobe failed pcd: No CD-ROM drive found kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN PTI CPU: 1 PID: 4525 Comm: syz-executor.0 Not tainted 5.1.0-rc3+ #8 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 RIP: 0010:pcd_init+0x95c/0x1000 [pcd] Code: c4 ab f7 48 89 d8 48 c1 e8 03 80 3c 28 00 74 08 48 89 df e8 56 a3 da f7 4c 8b 23 49 8d bc 24 80 05 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 74 05 e8 39 a3 da f7 49 8b bc 24 80 05 00 00 e8 cc b2 RSP: 0018:ffff8881e84df880 EFLAGS: 00010202 RAX: 00000000000000b0 RBX: ffffffffc155a088 RCX: ffffffffc1508935 RDX: 0000000000040000 RSI: ffffc900014f0000 RDI: 0000000000000580 RBP: dffffc0000000000 R08: ffffed103ee658b8 R09: ffffed103ee658b8 R10: 0000000000000001 R11: ffffed103ee658b7 R12: 0000000000000000 R13: ffffffffc155a778 R14: ffffffffc155a4a8 R15: 0000000000000003 FS: 00007fe71bee3700(0000) GS:ffff8881f7300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055a7334441a8 CR3: 00000001e9674003 CR4: 00000000007606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? 0xffffffffc1508000 ? 0xffffffffc1508000 do_one_initcall+0xbc/0x47d init/main.c:901 do_init_module+0x1b5/0x547 kernel/module.c:3456 load_module+0x6405/0x8c10 kernel/module.c:3804 __do_sys_finit_module+0x162/0x190 kernel/module.c:3898 do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x462e99 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fe71bee2c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99 RDX: 0000000000000000 RSI: 0000000020000180 RDI: 0000000000000003 RBP: 00007fe71bee2c70 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007fe71bee36bc R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004 Modules linked in: pcd(+) paride solos_pci atm ts_fsm rtc_mt6397 mac80211 nhc_mobility nhc_udp nhc_ipv6 nhc_hop nhc_dest nhc_fragment nhc_routing 6lowpan rtc_cros_ec memconsole intel_xhci_usb_role_switch roles rtc_wm8350 usbcore industrialio_triggered_buffer kfifo_buf industrialio asc7621 dm_era dm_persistent_data dm_bufio dm_mod tpm gnss_ubx gnss_serial serdev gnss max2165 cpufreq_dt hid_penmount hid menf21bmc_wdt rc_core n_tracesink ide_gd_mod cdns_csi2tx v4l2_fwnode videodev media pinctrl_lewisburg pinctrl_intel iptable_security iptable_raw iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bpfilter ip6_vti ip_vti ip_gre ipip sit tunnel4 ip_tunnel hsr veth netdevsim vxcan batman_adv cfg80211 rfkill chnl_net caif nlmon dummy team bonding vcan bridge stp llc ip6_gre gre ip6_tunnel tunnel6 tun joydev mousedev ppdev kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel aes_x86_64 crypto_simd ide_pci_generic piix input_leds cryptd glue_helper psmouse ide_core intel_agp serio_raw intel_gtt ata_generic i2c_piix4 agpgart pata_acpi parport_pc parport floppy rtc_cmos sch_fq_codel ip_tables x_tables sha1_ssse3 sha1_generic ipv6 [last unloaded: bmc150_magn] Dumping ftrace buffer: (ftrace buffer empty) ---[ end trace d873691c3cd69f56 ]--- If alloc_disk fails in pcd_init_units, cd->disk will be NULL, however in pcd_detect and pcd_exit, it's not check this before free.It may result a NULL pointer dereference. Also when register_blkdev failed, blk_cleanup_queue() and blk_mq_free_tag_set() should be called to free resources. Reported-by: Hulk Robot <[email protected]> Fixes: 81b74ac68c28 ("paride/pcd: cleanup queues when detection fails") Signed-off-by: YueHaibing <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static void xudc_ep0_in(struct xusb_udc *udc) { struct xusb_ep *ep0 = &udc->ep[0]; struct xusb_req *req; unsigned int bytes_to_tx; void *buffer; u32 epcfgreg; u16 count = 0; u16 length; u8 *ep0rambase; u8 test_mode = udc->setup.wIndex >> 8; req = list_first_entry(&ep0->queue, struct xusb_req, queue); bytes_to_tx = req->usb_req.length - req->usb_req.actual; switch (udc->setupseqtx) { case STATUS_PHASE: switch (udc->setup.bRequest) { case USB_REQ_SET_ADDRESS: /* Set the address of the device.*/ udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, udc->setup.wValue); break; case USB_REQ_SET_FEATURE: if (udc->setup.bRequestType == USB_RECIP_DEVICE) { if (udc->setup.wValue == USB_DEVICE_TEST_MODE) udc->write_fn(udc->addr, XUSB_TESTMODE_OFFSET, test_mode); } break; } req->usb_req.actual = req->usb_req.length; xudc_done(ep0, req, 0); break; case DATA_PHASE: if (!bytes_to_tx) { /* * We're done with data transfer, next * will be zero length OUT with data toggle of * 1. Setup data_toggle. */ epcfgreg = udc->read_fn(udc->addr + ep0->offset); epcfgreg |= XUSB_EP_CFG_DATA_TOGGLE_MASK; udc->write_fn(udc->addr, ep0->offset, epcfgreg); udc->setupseqtx = STATUS_PHASE; } else { length = count = min_t(u32, bytes_to_tx, EP0_MAX_PACKET); /* Copy the data to be transmitted into the DPRAM. */ ep0rambase = (u8 __force *) (udc->addr + (ep0->rambase << 2)); buffer = req->usb_req.buf + req->usb_req.actual; req->usb_req.actual = req->usb_req.actual + length; memcpy(ep0rambase, buffer, length); } udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count); udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1); break; default: break; } }
0
[ "CWE-20", "CWE-129" ]
linux
7f14c7227f342d9932f9b918893c8814f86d2a0d
67,923,076,886,364,090,000,000,000,000,000,000,000
65
USB: gadget: validate endpoint index for xilinx udc Assure that host may not manipulate the index to point past endpoint array. Signed-off-by: Szymon Heidrich <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; } else if (likely(!(i->type & ITER_PIPE))) return copy_page_to_iter_iovec(page, offset, bytes, i); else return copy_page_to_iter_pipe(page, offset, bytes, i); }
0
[ "CWE-200" ]
linux
b9dc6f65bc5e232d1c05fe34b5daadc7e8bbf1fb
281,059,589,077,611,130,000,000,000,000,000,000,000
13
fix a fencepost error in pipe_advance() The logics in pipe_advance() used to release all buffers past the new position failed in cases when the number of buffers to release was equal to pipe->buffers. If that happened, none of them had been released, leaving pipe full. Worse, it was trivial to trigger and we end up with pipe full of uninitialized pages. IOW, it's an infoleak. Cc: [email protected] # v4.9 Reported-by: "Alan J. Wylie" <[email protected]> Tested-by: "Alan J. Wylie" <[email protected]> Signed-off-by: Al Viro <[email protected]>
int mainwindow_set_statusbar_lines(MAIN_WINDOW_REC *window, int top, int bottom) { int ret; ret = -1; if (top != 0) { ret = window->statusbar_lines_top; window->statusbar_lines_top += top; window->statusbar_lines += top; } if (bottom != 0) { ret = window->statusbar_lines_bottom; window->statusbar_lines_bottom += bottom; window->statusbar_lines += bottom; } if (top+bottom != 0) window->size_dirty = TRUE; return ret; }
0
[ "CWE-476" ]
irssi
5b5bfef03596d95079c728f65f523570dd7b03aa
227,661,854,212,568,130,000,000,000,000,000,000,000
23
check the error condition of mainwindow_create
pdf14_debug_mask_stack_state(pdf14_ctx *ctx) { return; }
0
[ "CWE-416" ]
ghostpdl
90fd0c7ca3efc1ddff64a86f4104b13b3ac969eb
310,083,210,586,374,630,000,000,000,000,000,000,000
4
Bug 697456. Dont create new ctx when pdf14 device reenabled This bug had yet another weird case where the user created a file that pushed the pdf14 device twice. We were in that case, creating a new ctx and blowing away the original one with out proper clean up. To avoid, only create a new one when we need it.
irqreturn_t ipmi_si_irq_handler(int irq, void *data) { struct smi_info *smi_info = data; unsigned long flags; if (smi_info->io.si_type == SI_BT) /* We need to clear the IRQ flag for the BT interface. */ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, IPMI_BT_INTMASK_CLEAR_IRQ_BIT | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); spin_lock_irqsave(&(smi_info->si_lock), flags); smi_inc_stat(smi_info, interrupts); debug_timestamp("Interrupt"); smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); return IRQ_HANDLED; }
0
[ "CWE-416" ]
linux
401e7e88d4ef80188ffa07095ac00456f901b8c4
294,510,458,971,835,700,000,000,000,000,000,000,000
21
ipmi_si: fix use-after-free of resource->name When we excute the following commands, we got oops rmmod ipmi_si cat /proc/ioports [ 1623.482380] Unable to handle kernel paging request at virtual address ffff00000901d478 [ 1623.482382] Mem abort info: [ 1623.482383] ESR = 0x96000007 [ 1623.482385] Exception class = DABT (current EL), IL = 32 bits [ 1623.482386] SET = 0, FnV = 0 [ 1623.482387] EA = 0, S1PTW = 0 [ 1623.482388] Data abort info: [ 1623.482389] ISV = 0, ISS = 0x00000007 [ 1623.482390] CM = 0, WnR = 0 [ 1623.482393] swapper pgtable: 4k pages, 48-bit VAs, pgdp = 00000000d7d94a66 [ 1623.482395] [ffff00000901d478] pgd=000000dffbfff003, pud=000000dffbffe003, pmd=0000003f5d06e003, pte=0000000000000000 [ 1623.482399] Internal error: Oops: 96000007 [#1] SMP [ 1623.487407] Modules linked in: ipmi_si(E) nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm dm_mirror dm_region_hash dm_log iw_cm dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ses ghash_ce sha2_ce enclosure sha256_arm64 sg sha1_ce hisi_sas_v2_hw hibmc_drm sbsa_gwdt hisi_sas_main ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe mdio hns_dsaf ipmi_devintf hns_enet_drv ipmi_msghandler hns_mdio [last unloaded: ipmi_si] [ 1623.532410] CPU: 30 PID: 11438 Comm: cat Kdump: loaded Tainted: G E 5.0.0-rc3+ #168 [ 1623.541498] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017 [ 1623.548822] pstate: a0000005 (NzCv daif -PAN -UAO) [ 1623.553684] pc : string+0x28/0x98 [ 1623.557040] lr : vsnprintf+0x368/0x5e8 [ 1623.560837] sp : ffff000013213a80 [ 1623.564191] x29: ffff000013213a80 x28: ffff00001138abb5 [ 1623.569577] x27: ffff000013213c18 x26: ffff805f67d06049 [ 1623.574963] x25: 0000000000000000 x24: ffff00001138abb5 [ 1623.580349] x23: 0000000000000fb7 x22: ffff0000117ed000 [ 1623.585734] x21: ffff000011188fd8 x20: ffff805f67d07000 [ 1623.591119] x19: ffff805f67d06061 x18: ffffffffffffffff [ 1623.596505] x17: 0000000000000200 x16: 0000000000000000 [ 1623.601890] x15: ffff0000117ed748 x14: ffff805f67d07000 [ 1623.607276] x13: ffff805f67d0605e x12: 0000000000000000 [ 1623.612661] x11: 0000000000000000 x10: 0000000000000000 [ 1623.618046] x9 : 0000000000000000 x8 : 000000000000000f [ 1623.623432] x7 : ffff805f67d06061 x6 : fffffffffffffffe [ 1623.628817] x5 : 0000000000000012 x4 : ffff00000901d478 [ 1623.634203] x3 : ffff0a00ffffff04 x2 : ffff805f67d07000 [ 1623.639588] x1 : ffff805f67d07000 x0 : ffffffffffffffff [ 1623.644974] Process cat (pid: 11438, stack limit = 0x000000008d4cbc10) [ 1623.651592] Call trace: [ 1623.654068] string+0x28/0x98 [ 1623.657071] vsnprintf+0x368/0x5e8 [ 1623.660517] seq_vprintf+0x70/0x98 [ 1623.668009] seq_printf+0x7c/0xa0 [ 1623.675530] r_show+0xc8/0xf8 [ 1623.682558] seq_read+0x330/0x440 [ 1623.689877] proc_reg_read+0x78/0xd0 [ 1623.697346] __vfs_read+0x60/0x1a0 [ 1623.704564] vfs_read+0x94/0x150 [ 1623.711339] ksys_read+0x6c/0xd8 [ 1623.717939] __arm64_sys_read+0x24/0x30 [ 1623.725077] el0_svc_common+0x120/0x148 [ 1623.732035] el0_svc_handler+0x30/0x40 [ 1623.738757] el0_svc+0x8/0xc [ 1623.744520] Code: d1000406 aa0103e2 54000149 b4000080 (39400085) [ 1623.753441] ---[ end trace f91b6a4937de9835 ]--- [ 1623.760871] Kernel panic - not syncing: Fatal exception [ 1623.768935] SMP: stopping secondary CPUs [ 1623.775718] Kernel Offset: disabled [ 1623.781998] CPU features: 0x002,21006008 [ 1623.788777] Memory Limit: none [ 1623.798329] Starting crashdump kernel... [ 1623.805202] Bye! If io_setup is called successful in try_smi_init() but try_smi_init() goes out_err before calling ipmi_register_smi(), so ipmi_unregister_smi() will not be called while removing module. It leads to the resource that allocated in io_setup() can not be freed, but the name(DEVICE_NAME) of resource is freed while removing the module. It causes use-after-free when cat /proc/ioports. Fix this by calling io_cleanup() while try_smi_init() goes to out_err. and don't call io_cleanup() until io_setup() returns successful to avoid warning prints. Fixes: 93c303d2045b ("ipmi_si: Clean up shutdown a bit") Cc: [email protected] Reported-by: NuoHan Qiao <[email protected]> Suggested-by: Corey Minyard <[email protected]> Signed-off-by: Yang Yingliang <[email protected]> Signed-off-by: Corey Minyard <[email protected]>
int _gnutls_session_is_psk(gnutls_session_t session) { gnutls_kx_algorithm_t kx; kx = session->security_parameters.cs->kx_algorithm; if (kx == GNUTLS_KX_PSK || kx == GNUTLS_KX_DHE_PSK || kx == GNUTLS_KX_RSA_PSK) return 1; return 0; }
0
[]
gnutls
3d7fae761e65e9d0f16d7247ee8a464d4fe002da
53,296,907,857,597,610,000,000,000,000,000,000,000
11
valgrind: check if session ticket key is used without initialization This adds a valgrind client request for session->key.session_ticket_key to make sure that it is not used without initialization. Signed-off-by: Daiki Ueno <[email protected]>
static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, struct sctp_association *asoc, struct sctp_sock *sp, int hb_change, int pmtud_change, int sackdelay_change) { int error; if (params->spp_flags & SPP_HB_DEMAND && trans) { struct net *net = sock_net(trans->asoc->base.sk); error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of * this field is ignored. Note also that a value of zero indicates * the current setting should be left unchanged. */ if (params->spp_flags & SPP_HB_ENABLE) { /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is * set. This lets us use 0 value when this flag * is set. */ if (params->spp_flags & SPP_HB_TIME_IS_ZERO) params->spp_hbinterval = 0; if (params->spp_hbinterval || (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { if (trans) { trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else if (asoc) { asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else { sp->hbinterval = params->spp_hbinterval; } } } if (hb_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_HB) | hb_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_HB) | hb_change; } else { sp->param_flags = (sp->param_flags & ~SPP_HB) | hb_change; } } /* When Path MTU discovery is disabled the value specified here will * be the "fixed" path mtu (i.e. the value of the spp_flags field must * include the flag SPP_PMTUD_DISABLE for this field to have any * effect). */ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; } else { sp->pathmtu = params->spp_pathmtu; } } if (pmtud_change) { if (trans) { int update = (trans->param_flags & SPP_PMTUD_DISABLE) && (params->spp_flags & SPP_PMTUD_ENABLE); trans->param_flags = (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(asoc); } } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; } else { sp->param_flags = (sp->param_flags & ~SPP_PMTUD) | pmtud_change; } } /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the * value of this field is ignored. Note also that a value of zero * indicates the current setting should be left unchanged. */ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else if (asoc) { asoc->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else { sp->sackdelay = params->spp_sackdelay; } } if (sackdelay_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } } /* Note that a value of zero indicates the current setting should be left unchanged. */ if (params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { asoc->pathmaxrxt = params->spp_pathmaxrxt; } else { sp->pathmaxrxt = params->spp_pathmaxrxt; } } return 0; }
0
[ "CWE-416", "CWE-787" ]
linux
df80cd9b28b9ebaa284a41df611dbf3a2d05ca74
266,104,679,336,253,140,000,000,000,000,000,000,000
140
sctp: do not peel off an assoc from one netns to another one Now when peeling off an association to the sock in another netns, all transports in this assoc are not to be rehashed and keep use the old key in hashtable. As a transport uses sk->net as the hash key to insert into hashtable, it would miss removing these transports from hashtable due to the new netns when closing the sock and all transports are being freeed, then later an use-after-free issue could be caused when looking up an asoc and dereferencing those transports. This is a very old issue since very beginning, ChunYu found it with syzkaller fuzz testing with this series: socket$inet6_sctp() bind$inet6() sendto$inet6() unshare(0x40000000) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST() getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF() This patch is to block this call when peeling one assoc off from one netns to another one, so that the netns of all transport would not go out-sync with the key in hashtable. Note that this patch didn't fix it by rehashing transports, as it's difficult to handle the situation when the tuple is already in use in the new netns. Besides, no one would like to peel off one assoc to another netns, considering ipaddrs, ifaces, etc. are usually different. Reported-by: ChunYu Wang <[email protected]> Signed-off-by: Xin Long <[email protected]> Acked-by: Marcelo Ricardo Leitner <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
aggregate(struct task_group *tg, struct sched_domain *sd) { return &tg->cfs_rq[sd->first_cpu]->aggregate; }
0
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
238,394,624,123,556,720,000,000,000,000,000,000,000
4
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
void udf_evict_inode(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); int want_delete = 0; if (!is_bad_inode(inode)) { if (!inode->i_nlink) { want_delete = 1; udf_setsize(inode, 0); udf_update_inode(inode, IS_SYNC(inode)); } if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && inode->i_size != iinfo->i_lenExtents) { udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", inode->i_ino, inode->i_mode, (unsigned long long)inode->i_size, (unsigned long long)iinfo->i_lenExtents); } } truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); clear_inode(inode); kfree(iinfo->i_data); iinfo->i_data = NULL; udf_clear_extent_cache(inode); if (want_delete) { udf_free_inode(inode); } }
0
[ "CWE-476" ]
linux
7fc3b7c2981bbd1047916ade327beccb90994eee
184,298,838,756,135,840,000,000,000,000,000,000,000
30
udf: Fix NULL ptr deref when converting from inline format udf_expand_file_adinicb() calls directly ->writepage to write data expanded into a page. This however misses to setup inode for writeback properly and so we can crash on inode->i_wb dereference when submitting page for IO like: BUG: kernel NULL pointer dereference, address: 0000000000000158 #PF: supervisor read access in kernel mode ... <TASK> __folio_start_writeback+0x2ac/0x350 __block_write_full_page+0x37d/0x490 udf_expand_file_adinicb+0x255/0x400 [udf] udf_file_write_iter+0xbe/0x1b0 [udf] new_sync_write+0x125/0x1c0 vfs_write+0x28e/0x400 Fix the problem by marking the page dirty and going through the standard writeback path to write the page. Strictly speaking we would not even have to write the page but we want to catch e.g. ENOSPC errors early. Reported-by: butt3rflyh4ck <[email protected]> CC: [email protected] Fixes: 52ebea749aae ("writeback: make backing_dev_info host cgroup-specific bdi_writebacks") Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Jan Kara <[email protected]>
static int do_help(const char *prog) { printf("%s, NPAPI plugin viewer. Version %s\n", NPW_VIEWER, NPW_VERSION); printf("\n"); printf("usage: %s [GTK flags] [flags]\n", prog); printf(" -h --help print this message\n"); printf(" -t --test check plugin is compatible\n"); printf(" -i --info print plugin information\n"); printf(" -p --plugin set plugin path\n"); printf(" -c --connection set connection path\n"); return 0; }
0
[ "CWE-264" ]
nspluginwrapper
7e4ab8e1189846041f955e6c83f72bc1624e7a98
299,312,719,975,859,700,000,000,000,000,000,000,000
12
Support all the new variables added
char* crypto_cert_issuer(X509* xcert) { return crypto_print_name(X509_get_issuer_name(xcert)); }
0
[ "CWE-787" ]
FreeRDP
8305349a943c68b1bc8c158f431dc607655aadea
205,497,471,462,574,000,000,000,000,000,000,000,000
4
Fixed GHSL-2020-102 heap overflow (cherry picked from commit 197b16cc15a12813c2e4fa2d6ae9cd9c4a57e581)
smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { struct TCP_Server_Info *server = tcon->ses->server; unsigned int rsize; /* start with specified rsize, or default */ rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE; rsize = min_t(unsigned int, rsize, server->max_read); if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE); return rsize; }
0
[ "CWE-476" ]
linux
d6f5e358452479fa8a773b5c6ccc9e4ec5a20880
58,088,063,839,937,540,000,000,000,000,000,000,000
14
cifs: fix NULL ptr dereference in smb2_ioctl_query_info() When calling smb2_ioctl_query_info() with invalid smb_query_info::flags, a NULL ptr dereference is triggered when trying to kfree() uninitialised rqst[n].rq_iov array. This also fixes leaked paths that are created in SMB2_open_init() which required SMB2_open_free() to properly free them. Here is a small C reproducer that triggers it #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <fcntl.h> #include <sys/ioctl.h> #define die(s) perror(s), exit(1) #define QUERY_INFO 0xc018cf07 int main(int argc, char *argv[]) { int fd; if (argc < 2) exit(1); fd = open(argv[1], O_RDONLY); if (fd == -1) die("open"); if (ioctl(fd, QUERY_INFO, (uint32_t[]) { 0, 0, 0, 4, 0, 0}) == -1) die("ioctl"); close(fd); return 0; } mount.cifs //srv/share /mnt -o ... gcc repro.c && ./a.out /mnt/f0 [ 1832.124468] CIFS: VFS: \\w22-dc.zelda.test\test Invalid passthru query flags: 0x4 [ 1832.125043] general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI [ 1832.125764] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] [ 1832.126241] CPU: 3 PID: 1133 Comm: a.out Not tainted 5.17.0-rc8 #2 [ 1832.126630] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014 [ 1832.127322] RIP: 0010:smb2_ioctl_query_info+0x7a3/0xe30 [cifs] [ 1832.127749] Code: 00 00 00 fc ff df 48 c1 ea 03 80 3c 02 00 0f 85 6c 05 00 00 48 b8 00 00 00 00 00 fc ff df 4d 8b 74 24 28 4c 89 f2 48 c1 ea 03 <80> 3c 02 00 0f 85 cb 04 00 00 49 8b 3e e8 bb fc fa ff 48 89 da 48 [ 1832.128911] RSP: 0018:ffffc90000957b08 EFLAGS: 00010256 [ 1832.129243] RAX: dffffc0000000000 RBX: ffff888117e9b850 RCX: ffffffffa020580d [ 1832.129691] RDX: 0000000000000000 RSI: 0000000000000004 RDI: ffffffffa043a2c0 [ 1832.130137] RBP: ffff888117e9b878 R08: 0000000000000001 R09: 0000000000000003 [ 1832.130585] R10: fffffbfff4087458 R11: 0000000000000001 R12: ffff888117e9b800 [ 1832.131037] R13: 00000000ffffffea R14: 0000000000000000 R15: ffff888117e9b8a8 [ 1832.131485] FS: 00007fcee9900740(0000) GS:ffff888151a00000(0000) knlGS:0000000000000000 [ 1832.131993] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1832.132354] CR2: 00007fcee9a1ef5e CR3: 0000000114cd2000 CR4: 0000000000350ee0 [ 1832.132801] Call Trace: [ 1832.132962] <TASK> [ 1832.133104] ? smb2_query_reparse_tag+0x890/0x890 [cifs] [ 1832.133489] ? cifs_mapchar+0x460/0x460 [cifs] [ 1832.133822] ? rcu_read_lock_sched_held+0x3f/0x70 [ 1832.134125] ? cifs_strndup_to_utf16+0x15b/0x250 [cifs] [ 1832.134502] ? lock_downgrade+0x6f0/0x6f0 [ 1832.134760] ? cifs_convert_path_to_utf16+0x198/0x220 [cifs] [ 1832.135170] ? smb2_check_message+0x1080/0x1080 [cifs] [ 1832.135545] cifs_ioctl+0x1577/0x3320 [cifs] [ 1832.135864] ? lock_downgrade+0x6f0/0x6f0 [ 1832.136125] ? cifs_readdir+0x2e60/0x2e60 [cifs] [ 1832.136468] ? rcu_read_lock_sched_held+0x3f/0x70 [ 1832.136769] ? __rseq_handle_notify_resume+0x80b/0xbe0 [ 1832.137096] ? __up_read+0x192/0x710 [ 1832.137327] ? __ia32_sys_rseq+0xf0/0xf0 [ 1832.137578] ? __x64_sys_openat+0x11f/0x1d0 [ 1832.137850] __x64_sys_ioctl+0x127/0x190 [ 1832.138103] do_syscall_64+0x3b/0x90 [ 1832.138378] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 1832.138702] RIP: 0033:0x7fcee9a253df [ 1832.138937] Code: 00 48 89 44 24 18 31 c0 48 8d 44 24 60 c7 04 24 10 00 00 00 48 89 44 24 08 48 8d 44 24 20 48 89 44 24 10 b8 10 00 00 00 0f 05 <41> 89 c0 3d 00 f0 ff ff 77 1f 48 8b 44 24 18 64 48 2b 04 25 28 00 [ 1832.140107] RSP: 002b:00007ffeba94a8a0 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 1832.140606] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fcee9a253df [ 1832.141058] RDX: 00007ffeba94a910 RSI: 00000000c018cf07 RDI: 0000000000000003 [ 1832.141503] RBP: 00007ffeba94a930 R08: 00007fcee9b24db0 R09: 00007fcee9b45c4e [ 1832.141948] R10: 00007fcee9918d40 R11: 0000000000000246 R12: 00007ffeba94aa48 [ 1832.142396] R13: 0000000000401176 R14: 0000000000403df8 R15: 00007fcee9b78000 [ 1832.142851] </TASK> [ 1832.142994] Modules linked in: cifs cifs_arc4 cifs_md4 bpf_preload [last unloaded: cifs] Cc: [email protected] Signed-off-by: Paulo Alcantara (SUSE) <[email protected]> Signed-off-by: Steve French <[email protected]>
do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) { if (thread_flags & _TIF_SIGPENDING) do_signal(&current->blocked, regs, syscall); if (thread_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } }
1
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
43,731,854,912,297,120,000,000,000,000,000,000,000
10
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
link_info_got_data (NautilusDirectory *directory, NautilusFile *file, gboolean result, goffset bytes_read, char *file_contents) { char *link_uri, *uri, *name, *icon; gboolean is_launcher; gboolean is_foreign; nautilus_directory_ref (directory); uri = NULL; name = NULL; icon = NULL; is_launcher = FALSE; is_foreign = FALSE; /* Handle the case where we read the Nautilus link. */ if (result) { link_uri = nautilus_file_get_uri (file); nautilus_link_get_link_info_given_file_contents (file_contents, bytes_read, link_uri, &uri, &name, &icon, &is_launcher, &is_foreign); g_free (link_uri); } else { /* FIXME bugzilla.gnome.org 42433: We should report this error to the user. */ } nautilus_file_ref (file); link_info_done (directory, file, uri, name, icon, is_launcher, is_foreign); nautilus_file_changed (file); nautilus_file_unref (file); g_free (uri); g_free (name); g_free (icon); nautilus_directory_unref (directory); }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
161,893,264,456,971,590,000,000,000,000,000,000,000
39
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
read_cups_files_conf(cups_file_t *fp) /* I - File to read from */ { int linenum; /* Current line number */ char line[HTTP_MAX_BUFFER], /* Line from file */ *value; /* Value from line */ struct group *group; /* Group */ /* * Loop through each line in the file... */ linenum = 0; while (cupsFileGetConf(fp, line, sizeof(line), &value, &linenum)) { if (!_cups_strcasecmp(line, "FatalErrors")) FatalErrors = parse_fatal_errors(value); else if (!_cups_strcasecmp(line, "Group") && value) { /* * Group ID to run as... */ if (isdigit(value[0])) Group = (gid_t)atoi(value); else { endgrent(); group = getgrnam(value); if (group != NULL) Group = group->gr_gid; else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown Group \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } } else if (!_cups_strcasecmp(line, "PrintcapFormat") && value) { /* * Format of printcap file? */ if (!_cups_strcasecmp(value, "bsd")) PrintcapFormat = PRINTCAP_BSD; else if (!_cups_strcasecmp(value, "plist")) PrintcapFormat = PRINTCAP_PLIST; else if (!_cups_strcasecmp(value, "solaris")) PrintcapFormat = PRINTCAP_SOLARIS; else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown PrintcapFormat \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } else if (!_cups_strcasecmp(line, "Sandboxing") && value) { /* * Level of sandboxing? */ if (!_cups_strcasecmp(value, "off") && getuid()) { Sandboxing = CUPSD_SANDBOXING_OFF; cupsdLogMessage(CUPSD_LOG_WARN, "Disabling sandboxing is not recommended (line %d of %s)", linenum, CupsFilesFile); } else if (!_cups_strcasecmp(value, "relaxed")) Sandboxing = CUPSD_SANDBOXING_RELAXED; else if (!_cups_strcasecmp(value, "strict")) Sandboxing = CUPSD_SANDBOXING_STRICT; else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown Sandboxing \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } else if (!_cups_strcasecmp(line, "SystemGroup") && value) { /* * SystemGroup (admin) group(s)... */ if (!parse_groups(value, linenum)) { if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } else if (!_cups_strcasecmp(line, "User") && value) { /* * User ID to run as... */ if (isdigit(value[0] & 255)) { int uid = atoi(value); if (!uid) { cupsdLogMessage(CUPSD_LOG_ERROR, "Will not use User 0 as specified on line %d of %s " "for security reasons. You must use a non-" "privileged account instead.", linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } else User = (uid_t)atoi(value); } else { struct passwd *p; /* Password information */ endpwent(); p = getpwnam(value); if (p) { if (!p->pw_uid) { cupsdLogMessage(CUPSD_LOG_ERROR, "Will not use User %s (UID=0) as specified on line " "%d of %s for security reasons. You must use a " "non-privileged account instead.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } else User = p->pw_uid; } else { cupsdLogMessage(CUPSD_LOG_ERROR, "Unknown User \"%s\" on line %d of %s.", value, linenum, CupsFilesFile); if (FatalErrors & CUPSD_FATAL_CONFIG) return (0); } } } else if (!_cups_strcasecmp(line, "ServerCertificate") || !_cups_strcasecmp(line, "ServerKey")) { cupsdLogMessage(CUPSD_LOG_INFO, "The \"%s\" directive on line %d of %s is no longer " "supported; this will become an error in a future " "release.", line, linenum, CupsFilesFile); } else if (!parse_variable(CupsFilesFile, linenum, line, value, sizeof(cupsfiles_vars) / sizeof(cupsfiles_vars[0]), cupsfiles_vars) && (FatalErrors & CUPSD_FATAL_CONFIG)) return (0); } return (1); }
1
[]
cups
d47f6aec436e0e9df6554436e391471097686ecc
134,273,984,066,564,700,000,000,000,000,000,000,000
173
Fix local privilege escalation to root and sandbox bypasses in scheduler (rdar://37836779, rdar://37836995, rdar://37837252, rdar://37837581)
static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct request_queue *q, struct blkcg_gq *new_blkg) { struct blkcg_gq *blkg; struct bdi_writeback_congested *wb_congested; int i, ret; WARN_ON_ONCE(!rcu_read_lock_held()); lockdep_assert_held(q->queue_lock); /* blkg holds a reference to blkcg */ if (!css_tryget_online(&blkcg->css)) { ret = -ENODEV; goto err_free_blkg; } wb_congested = wb_congested_get_create(q->backing_dev_info, blkcg->css.id, GFP_NOWAIT | __GFP_NOWARN); if (!wb_congested) { ret = -ENOMEM; goto err_put_css; } /* allocate */ if (!new_blkg) { new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!new_blkg)) { ret = -ENOMEM; goto err_put_congested; } } blkg = new_blkg; blkg->wb_congested = wb_congested; /* link parent */ if (blkcg_parent(blkcg)) { blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); if (WARN_ON_ONCE(!blkg->parent)) { ret = -ENODEV; goto err_put_congested; } blkg_get(blkg->parent); } /* invoke per-policy init */ for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; if (blkg->pd[i] && pol->pd_init_fn) pol->pd_init_fn(blkg->pd[i]); } /* insert */ spin_lock(&blkcg->lock); ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); if (likely(!ret)) { hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); list_add(&blkg->q_node, &q->blkg_list); for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; if (blkg->pd[i] && pol->pd_online_fn) pol->pd_online_fn(blkg->pd[i]); } } blkg->online = true; spin_unlock(&blkcg->lock); if (!ret) return blkg; /* @blkg failed fully initialized, use the usual release path */ blkg_put(blkg); return ERR_PTR(ret); err_put_congested: wb_congested_put(wb_congested); err_put_css: css_put(&blkcg->css); err_free_blkg: blkg_free(new_blkg); return ERR_PTR(ret); }
0
[ "CWE-415" ]
linux
9b54d816e00425c3a517514e0d677bb3cec49258
180,498,343,183,135,400,000,000,000,000,000,000,000
86
blkcg: fix double free of new_blkg in blkcg_init_queue If blkg_create fails, new_blkg passed as an argument will be freed by blkg_create, so there is no need to free it again. Signed-off-by: Hou Tao <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
return SUCCESS; } static PHP_MINIT_FUNCTION(tidy) { REGISTER_INI_ENTRIES(); REGISTER_TIDY_CLASS(tidy, doc, NULL, 0); REGISTER_TIDY_CLASS(tidyNode, node, NULL, ZEND_ACC_FINAL_CLASS); tidy_object_handlers_doc.cast_object = tidy_doc_cast_handler; tidy_object_handlers_node.cast_object = tidy_node_cast_handler; _php_tidy_register_tags(INIT_FUNC_ARGS_PASSTHRU); _php_tidy_register_nodetypes(INIT_FUNC_ARGS_PASSTHRU);
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
14,869,540,238,980,655,000,000,000,000,000,000,000
14
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
TEST_P(Http2CodecImplTest, PriorityFlood) { priorityFlood(); EXPECT_THROW(client_->sendPendingFrames(), ServerCodecError); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
320,037,149,223,144,480,000,000,000,000,000,000,000
4
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static unsigned long get_sb_block(void **data) { unsigned long sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { printk("EXT2-fs: Invalid sb specification: %s\n", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; }
0
[ "CWE-241", "CWE-19" ]
linux
be0726d33cb8f411945884664924bed3cb8c70ee
228,779,041,997,005,900,000,000,000,000,000,000,000
19
ext2: convert to mbcache2 The conversion is generally straightforward. We convert filesystem from a global cache to per-fs one. Similarly to ext4 the tricky part is that xattr block corresponding to found mbcache entry can get freed before we get buffer lock for that block. So we have to check whether the entry is still valid after getting the buffer lock. Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]>
GF_Err pasp_Read(GF_Box *s, GF_BitStream *bs) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s; ptr->hSpacing = gf_bs_read_u32(bs); ptr->vSpacing = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 8); return GF_OK; }
0
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
54,777,020,291,323,330,000,000,000,000,000,000,000
8
fixed 2 possible heap overflows (inc. #1088)
static void nvme_compare_mdata_cb(void *opaque, int ret) { NvmeRequest *req = opaque; NvmeNamespace *ns = req->ns; NvmeCtrl *n = nvme_ctrl(req); NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd; uint8_t prinfo = NVME_RW_PRINFO(le16_to_cpu(rw->control)); uint16_t apptag = le16_to_cpu(rw->apptag); uint16_t appmask = le16_to_cpu(rw->appmask); uint32_t reftag = le32_to_cpu(rw->reftag); struct nvme_compare_ctx *ctx = req->opaque; g_autofree uint8_t *buf = NULL; BlockBackend *blk = ns->blkconf.blk; BlockAcctCookie *acct = &req->acct; BlockAcctStats *stats = blk_get_stats(blk); uint16_t status = NVME_SUCCESS; trace_pci_nvme_compare_mdata_cb(nvme_cid(req)); if (ret) { block_acct_failed(stats, acct); nvme_aio_err(req, ret); goto out; } buf = g_malloc(ctx->mdata.iov.size); status = nvme_bounce_mdata(n, buf, ctx->mdata.iov.size, NVME_TX_DIRECTION_TO_DEVICE, req); if (status) { req->status = status; goto out; } if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) { uint64_t slba = le64_to_cpu(rw->slba); uint8_t *bufp; uint8_t *mbufp = ctx->mdata.bounce; uint8_t *end = mbufp + ctx->mdata.iov.size; int16_t pil = 0; status = nvme_dif_check(ns, ctx->data.bounce, ctx->data.iov.size, ctx->mdata.bounce, ctx->mdata.iov.size, prinfo, slba, apptag, appmask, &reftag); if (status) { req->status = status; goto out; } /* * When formatted with protection information, do not compare the DIF * tuple. */ if (!(ns->id_ns.dps & NVME_ID_NS_DPS_FIRST_EIGHT)) { pil = ns->lbaf.ms - sizeof(NvmeDifTuple); } for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { req->status = NVME_CMP_FAILURE; goto out; } } goto out; } if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { req->status = NVME_CMP_FAILURE; goto out; } block_acct_done(stats, acct); out: qemu_iovec_destroy(&ctx->data.iov); g_free(ctx->data.bounce); qemu_iovec_destroy(&ctx->mdata.iov); g_free(ctx->mdata.bounce); g_free(ctx); nvme_enqueue_req_completion(nvme_cq(req), req); }
0
[]
qemu
736b01642d85be832385063f278fe7cd4ffb5221
113,327,137,796,586,310,000,000,000,000,000,000,000
85
hw/nvme: fix CVE-2021-3929 This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the device itself. This still allows DMA to MMIO regions of other devices (e.g. doing P2P DMA to the controller memory buffer of another NVMe device). Fixes: CVE-2021-3929 Reported-by: Qiuhao Li <[email protected]> Reviewed-by: Keith Busch <[email protected]> Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Signed-off-by: Klaus Jensen <[email protected]>
void PSOutputDev::writeXpdfProcset() { GBool lev1, lev2, lev3, sep, nonSep; char **p; char *q; writePSFmt("%%BeginResource: procset xpdf {0:s} 0\n", "3.00"); writePSFmt("%%Copyright: {0:s}\n", xpdfCopyright); lev1 = lev2 = lev3 = sep = nonSep = gTrue; for (p = prolog; *p; ++p) { if ((*p)[0] == '~') { lev1 = lev2 = lev3 = sep = nonSep = gFalse; for (q = *p + 1; *q; ++q) { switch (*q) { case '1': lev1 = gTrue; break; case '2': lev2 = gTrue; break; case '3': lev3 = gTrue; break; case 's': sep = gTrue; break; case 'n': nonSep = gTrue; break; } } } else if ((level == psLevel1 && lev1 && nonSep) || (level == psLevel1Sep && lev1 && sep) || (level == psLevel2 && lev2 && nonSep) || (level == psLevel2Sep && lev2 && sep) || (level == psLevel3 && lev3 && nonSep) || (level == psLevel3Sep && lev3 && sep)) { writePSFmt("{0:s}\n", *p); } } writePS("%%EndResource\n"); if (level >= psLevel3) { for (p = cmapProlog; *p; ++p) { writePSFmt("{0:s}\n", *p); } } }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
169,190,468,809,389,240,000,000,000,000,000,000,000
37
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
static void MagickPNGWarningHandler(png_struct *ping,png_const_charp message) { ExceptionInfo *exception; Image *image; PNGErrorInfo *error_info; if (LocaleCompare(message, "Missing PLTE before tRNS") == 0) png_error(ping, message); error_info=(PNGErrorInfo *) png_get_error_ptr(ping); image=error_info->image; exception=error_info->exception; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s warning: %s", png_get_libpng_ver(NULL),message); (void) ThrowMagickException(exception,GetMagickModule(),CoderWarning, message,"`%s'",image->filename); }
0
[ "CWE-476" ]
ImageMagick
816ecab6c532ae086ff4186b3eaf4aa7092d536f
155,862,348,082,870,360,000,000,000,000,000,000,000
23
https://github.com/ImageMagick/ImageMagick/issues/58
xmlIsLetter(int c) { return(IS_BASECHAR(c) || IS_IDEOGRAPHIC(c)); }
0
[ "CWE-119" ]
libxml2
23f05e0c33987d6605387b300c4be5da2120a7ab
162,438,645,702,244,370,000,000,000,000,000,000,000
3
Detect excessive entities expansion upon replacement If entities expansion in the XML parser is asked for, it is possble to craft relatively small input document leading to excessive on-the-fly content generation. This patch accounts for those replacement and stop parsing after a given threshold. it can be bypassed as usual with the HUGE parser option.
void mlsd_fact(char fact, char *buf, size_t len, char *name, char *perms, struct stat *st) { char size[20]; switch (fact) { case 'm': strlcat(buf, "modify=", len); strlcat(buf, mlsd_time(st->st_mtime), len); break; case 'p': strlcat(buf, "perm=", len); strlcat(buf, perms, len); break; case 't': strlcat(buf, "type=", len); strlcat(buf, mlsd_type(name, st->st_mode), len); break; case 's': if (S_ISDIR(st->st_mode)) return; snprintf(size, sizeof(size), "size=%" PRIu64, st->st_size); strlcat(buf, size, len); break; default: return; } strlcat(buf, ";", len); }
0
[ "CWE-120", "CWE-787" ]
uftpd
0fb2c031ce0ace07cc19cd2cb2143c4b5a63c9dd
269,265,994,209,335,030,000,000,000,000,000,000,000
34
FTP: Fix buffer overflow in PORT parser, reported by Aaron Esau Signed-off-by: Joachim Nilsson <[email protected]>
int cli_matchmeta(cli_ctx *ctx, const char *fname, size_t fsizec, size_t fsizer, int encrypted, unsigned int filepos, int res1, void *res2) { const struct cli_cdb *cdb; unsigned int viruses_found = 0; int ret = CL_CLEAN; cli_dbgmsg("CDBNAME:%s:%llu:%s:%llu:%llu:%d:%u:%u:%p\n", cli_ftname(cli_get_container_type(ctx, -1)), (long long unsigned)fsizec, fname, (long long unsigned)fsizec, (long long unsigned)fsizer, encrypted, filepos, res1, res2); if (ctx->engine && ctx->engine->cb_meta) if (ctx->engine->cb_meta(cli_ftname(cli_get_container_type(ctx, -1)), fsizec, fname, fsizer, encrypted, filepos, ctx->cb_ctx) == CL_VIRUS) { cli_dbgmsg("inner file blacklisted by callback: %s\n", fname); ret = cli_append_virus(ctx, "Detected.By.Callback"); viruses_found++; if(!SCAN_ALL || ret != CL_CLEAN) return ret; } if(!ctx->engine || !(cdb = ctx->engine->cdb)) return CL_CLEAN; do { if(cdb->ctype != CL_TYPE_ANY && cdb->ctype != cli_get_container_type(ctx, -1)) continue; if(cdb->encrypted != 2 && cdb->encrypted != encrypted) continue; if(cdb->res1 && (cdb->ctype == CL_TYPE_ZIP || cdb->ctype == CL_TYPE_RAR) && cdb->res1 != res1) continue; #define CDBRANGE(field, val) \ if(field[0] != CLI_OFF_ANY) { \ if(field[0] == field[1] && field[0] != val) \ continue; \ else if(field[0] != field[1] && ((field[0] && field[0] > val) ||\ (field[1] && field[1] < val))) \ continue; \ } CDBRANGE(cdb->csize, cli_get_container_size(ctx, -1)); CDBRANGE(cdb->fsizec, fsizec); CDBRANGE(cdb->fsizer, fsizer); CDBRANGE(cdb->filepos, filepos); if(cdb->name.re_magic && (!fname || cli_regexec(&cdb->name, fname, 0, NULL, 0) == REG_NOMATCH)) continue; ret = cli_append_virus(ctx, cdb->virname); viruses_found++; if(!SCAN_ALL || ret != CL_CLEAN) return ret; } while((cdb = cdb->next)); if (SCAN_ALL && viruses_found) return CL_VIRUS; return CL_CLEAN; }
1
[]
clamav-devel
167c0079292814ec5523d0b97a9e1b002bf8819b
117,389,727,780,499,880,000,000,000,000,000,000,000
61
fix 0.99.3 false negative of virus Pdf.Exploit.CVE_2016_1046-1.
set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p) { tor_assert(conn); tor_assert(p); tor_assert(p->is_unix_addr); conn->base_.socket_family = AF_UNIX; tor_addr_make_unspec(&conn->base_.addr); conn->base_.port = 1; conn->base_.address = tor_strdup(p->unix_addr); return 0; }
0
[ "CWE-532" ]
tor
09ea89764a4d3a907808ed7d4fe42abfe64bd486
172,730,866,335,438,560,000,000,000,000,000,000,000
12
Fix log-uninitialized-stack bug in rend_service_intro_established. Fixes bug 23490; bugfix on 0.2.7.2-alpha. TROVE-2017-008 CVE-2017-0380
bool isRoutable() const override { return false; }
0
[ "CWE-400" ]
envoy
dfddb529e914d794ac552e906b13d71233609bf7
185,242,985,801,624,460,000,000,000,000,000,000,000
1
listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen <[email protected]>
static void ipa_region_frame(wmfAPI * API, wmfPolyRectangle_t * poly_rect) { /* Save graphic wand */ (void) PushDrawingWand(WmfDrawingWand); if (TO_FILL(poly_rect) || TO_DRAW(poly_rect)) { long i; draw_fill_color_string(WmfDrawingWand,"none"); util_set_brush(API, poly_rect->dc, BrushApplyStroke); for (i = 0; i < (long) poly_rect->count; i++) { DrawRectangle(WmfDrawingWand, XC(poly_rect->TL[i].x), YC(poly_rect->TL[i].y), XC(poly_rect->BR[i].x), YC(poly_rect->BR[i].y)); } } /* Restore graphic wand */ (void) PopDrawingWand(WmfDrawingWand); }
0
[ "CWE-772" ]
ImageMagick
b2b48d50300a9fbcd0aa0d9230fd6d7a08f7671e
36,851,430,811,751,483,000,000,000,000,000,000,000
24
https://github.com/ImageMagick/ImageMagick/issues/544
static int sctp_setsockopt_default_sndinfo(struct sock *sk, struct sctp_sndinfo *info, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; if (optlen != sizeof(*info)) return -EINVAL; if (info->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info->snd_assoc_id); if (!asoc && info->snd_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info->snd_sid; asoc->default_flags = info->snd_flags; asoc->default_ppid = info->snd_ppid; asoc->default_context = info->snd_context; return 0; } if (sctp_style(sk, TCP)) info->snd_assoc_id = SCTP_FUTURE_ASSOC; if (info->snd_assoc_id == SCTP_FUTURE_ASSOC || info->snd_assoc_id == SCTP_ALL_ASSOC) { sp->default_stream = info->snd_sid; sp->default_flags = info->snd_flags; sp->default_ppid = info->snd_ppid; sp->default_context = info->snd_context; } if (info->snd_assoc_id == SCTP_CURRENT_ASSOC || info->snd_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &sp->ep->asocs, asocs) { asoc->default_stream = info->snd_sid; asoc->default_flags = info->snd_flags; asoc->default_ppid = info->snd_ppid; asoc->default_context = info->snd_context; } } return 0; }
0
[ "CWE-362" ]
linux
b166a20b07382b8bc1dcee2a448715c9c2c81b5b
15,348,127,749,340,300,000,000,000,000,000,000,000
51
net/sctp: fix race condition in sctp_destroy_sock If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock held and sp->do_auto_asconf is true, then an element is removed from the auto_asconf_splist without any proper locking. This can happen in the following functions: 1. In sctp_accept, if sctp_sock_migrate fails. 2. In inet_create or inet6_create, if there is a bpf program attached to BPF_CGROUP_INET_SOCK_CREATE which denies creation of the sctp socket. The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock instead of sctp_close. This addresses CVE-2021-23133. Reported-by: Or Cohen <[email protected]> Reviewed-by: Xin Long <[email protected]> Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications") Signed-off-by: Or Cohen <[email protected]> Acked-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: David S. Miller <[email protected]>
TEST_P(RBACIntegrationTest, HeaderMatchCondition) { config_helper_.addFilter(fmt::format(RBAC_CONFIG_HEADER_MATCH_CONDITION, "yyy")); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); auto response = codec_client_->makeRequestWithBody( Http::TestRequestHeaderMapImpl{ {":method", "POST"}, {":path", "/path"}, {":scheme", "http"}, {":authority", "host"}, {"xxx", "yyy"}, }, 1024); waitForNextUpstreamRequest(); upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); response->waitForEndStream(); ASSERT_TRUE(response->complete()); EXPECT_EQ("200", response->headers().getStatusValue()); }
0
[]
envoy
2c60632d41555ec8b3d9ef5246242be637a2db0f
60,391,297,392,304,760,000,000,000,000,000,000,000
22
http: header map security fixes for duplicate headers (#197) Previously header matching did not match on all headers for non-inline headers. This patch changes the default behavior to always logically match on all headers. Multiple individual headers will be logically concatenated with ',' similar to what is done with inline headers. This makes the behavior effectively consistent. This behavior can be temporary reverted by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to "false". Targeted fixes have been additionally performed on the following extensions which make them consider all duplicate headers by default as a comma concatenated list: 1) Any extension using CEL matching on headers. 2) The header to metadata filter. 3) The JWT filter. 4) The Lua filter. Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to false. Finally, the setCopy() header map API previously only set the first header in the case of duplicate non-inline headers. setCopy() now behaves similiarly to the other set*() APIs and replaces all found headers with a single value. This may have had security implications in the extauth filter which uses this API. This behavior can be disabled by setting the runtime value "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. Fixes https://github.com/envoyproxy/envoy-setec/issues/188 Signed-off-by: Matt Klein <[email protected]>
static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (v == SEQ_START_TOKEN) ? rose_neigh_list : ((struct rose_neigh *)v)->next; }
0
[]
linux
148ca04518070910739dfc4eeda765057856403d
259,345,453,213,798,800,000,000,000,000,000,000,000
7
net: rose: fix UAF bug caused by rose_t0timer_expiry There are UAF bugs caused by rose_t0timer_expiry(). The root cause is that del_timer() could not stop the timer handler that is running and there is no synchronization. One of the race conditions is shown below: (thread 1) | (thread 2) | rose_device_event | rose_rt_device_down | rose_remove_neigh rose_t0timer_expiry | rose_stop_t0timer(rose_neigh) ... | del_timer(&neigh->t0timer) | kfree(rose_neigh) //[1]FREE neigh->dce_mode //[2]USE | The rose_neigh is deallocated in position [1] and use in position [2]. The crash trace triggered by POC is like below: BUG: KASAN: use-after-free in expire_timers+0x144/0x320 Write of size 8 at addr ffff888009b19658 by task swapper/0/0 ... Call Trace: <IRQ> dump_stack_lvl+0xbf/0xee print_address_description+0x7b/0x440 print_report+0x101/0x230 ? expire_timers+0x144/0x320 kasan_report+0xed/0x120 ? expire_timers+0x144/0x320 expire_timers+0x144/0x320 __run_timers+0x3ff/0x4d0 run_timer_softirq+0x41/0x80 __do_softirq+0x233/0x544 ... This patch changes rose_stop_ftimer() and rose_stop_t0timer() in rose_remove_neigh() to del_timer_sync() in order that the timer handler could be finished before the resources such as rose_neigh and so on are deallocated. As a result, the UAF bugs could be mitigated. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Duoming Zhou <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
ProcQueueInit(PROC_QUEUE *queue) { SHMQueueInit(&(queue->links)); queue->size = 0; }
0
[ "CWE-89" ]
postgres
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
11,231,703,852,939,038,000,000,000,000,000,000,000
5
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
static bool cmd_read_dma(IDEState *s, uint8_t cmd) { bool lba48 = (cmd == WIN_READDMA_EXT); if (!s->blk) { ide_abort_command(s); return true; } ide_cmd_lba48_transform(s, lba48); ide_sector_start_dma(s, IDE_DMA_READ); return false; }
0
[ "CWE-399" ]
qemu
3251bdcf1c67427d964517053c3d185b46e618e8
105,715,145,536,075,750,000,000,000,000,000,000,000
14
ide: Correct handling of malformed/short PRDTs This impacts both BMDMA and AHCI HBA interfaces for IDE. Currently, we confuse the difference between a PRDT having "0 bytes" and a PRDT having "0 complete sectors." When we receive an incomplete sector, inconsistent error checking leads to an infinite loop wherein the call succeeds, but it didn't give us enough bytes -- leading us to re-call the DMA chain over and over again. This leads to, in the BMDMA case, leaked memory for short PRDTs, and infinite loops and resource usage in the AHCI case. The .prepare_buf() callback is reworked to return the number of bytes that it successfully prepared. 0 is a valid, non-error answer that means the table was empty and described no bytes. -1 indicates an error. Our current implementation uses the io_buffer in IDEState to ultimately describe the size of a prepared scatter-gather list. Even though the AHCI PRDT/SGList can be as large as 256GiB, the AHCI command header limits transactions to just 4GiB. ATA8-ACS3, however, defines the largest transaction to be an LBA48 command that transfers 65,536 sectors. With a 512 byte sector size, this is just 32MiB. Since our current state structures use the int type to describe the size of the buffer, and this state is migrated as int32, we are limited to describing 2GiB buffer sizes unless we change the migration protocol. For this reason, this patch begins to unify the assertions in the IDE pathways that the scatter-gather list provided by either the AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum, 2GiB. This should be resilient enough unless we need a sector size that exceeds 32KiB. Further, the likelihood of any guest operating system actually attempting to transfer this much data in a single operation is very slim. To this end, the IDEState variables have been updated to more explicitly clarify our maximum supported size. Callers to the prepare_buf callback have been reworked to understand the new return code, and all versions of the prepare_buf callback have been adjusted accordingly. Lastly, the ahci_populate_sglist helper, relied upon by the AHCI implementation of .prepare_buf() as well as the PCI implementation of the callback have had overflow assertions added to help make clear the reasonings behind the various type changes. [Added %d -> %"PRId64" fix John sent because off_pos changed from int to int64_t. --Stefan] Signed-off-by: John Snow <[email protected]> Reviewed-by: Paolo Bonzini <[email protected]> Message-id: [email protected] Signed-off-by: Stefan Hajnoczi <[email protected]>
static int dec(struct cstate *g, int c) { if (c >= '0' && c <= '9') return c - '0'; die(g, "invalid quantifier"); return 0; }
0
[ "CWE-703", "CWE-674" ]
mujs
160ae29578054dc09fd91e5401ef040d52797e61
35,942,881,872,111,996,000,000,000,000,000,000,000
6
Issue #162: Check stack overflow during regexp compilation. Only bother checking during the first compilation pass that counts the size of the program.
Item_cache_real(THD *thd, const Type_handler *h) :Item_cache(thd, h), value(0) {}
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
135,494,014,456,997,300,000,000,000,000,000,000,000
4
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
RateLimiter(qqueue_t *pThis) { DEFiRet; int iDelay; int iHrCurr; time_t tCurr; struct tm m; ISOBJ_TYPE_assert(pThis, qqueue); iDelay = 0; if(pThis->iDeqtWinToHr != 25) { /* 25 means disabled */ /* time calls are expensive, so only do them when needed */ datetime.GetTime(&tCurr); localtime_r(&tCurr, &m); iHrCurr = m.tm_hour; if(pThis->iDeqtWinToHr < pThis->iDeqtWinFromHr) { if(iHrCurr < pThis->iDeqtWinToHr || iHrCurr > pThis->iDeqtWinFromHr) { ; /* do not delay */ } else { iDelay = (pThis->iDeqtWinFromHr - iHrCurr) * 3600; /* this time, we are already into the next hour, so we need * to subtract our current minute and seconds. */ iDelay -= m.tm_min * 60; iDelay -= m.tm_sec; } } else { if(iHrCurr >= pThis->iDeqtWinFromHr && iHrCurr < pThis->iDeqtWinToHr) { ; /* do not delay */ } else { if(iHrCurr < pThis->iDeqtWinFromHr) { iDelay = (pThis->iDeqtWinFromHr - iHrCurr - 1) * 3600; /* -1 as we are already in the hour */ iDelay += (60 - m.tm_min) * 60; iDelay += 60 - m.tm_sec; } else { iDelay = (24 - iHrCurr + pThis->iDeqtWinFromHr) * 3600; /* this time, we are already into the next hour, so we need * to subtract our current minute and seconds. */ iDelay -= m.tm_min * 60; iDelay -= m.tm_sec; } } } } if(iDelay > 0) { DBGOPRINT((obj_t*) pThis, "outside dequeue time window, delaying %d seconds\n", iDelay); srSleep(iDelay, 0); } RETiRet; }
0
[ "CWE-772" ]
rsyslog
dfa88369d4ca4290db56b843f9eabdae1bfe0fd5
294,661,340,683,307,440,000,000,000,000,000,000,000
55
bugfix: memory leak when $RepeatedMsgReduction on was used bug tracker: http://bugzilla.adiscon.com/show_bug.cgi?id=225
static inline funcid_t readAPIFuncID(struct cli_bc *bc, unsigned char *p, unsigned *off, unsigned len, char *ok) { funcid_t id = readNumber(p, off, len, ok)-1; if (*ok && !cli_bitset_test(bc->uses_apis, id)) { cli_errmsg("Called undeclared API function: %u\n", id); *ok = 0; return ~0; } return id; }
0
[ "CWE-189" ]
clamav-devel
3d664817f6ef833a17414a4ecea42004c35cc42f
64,849,602,926,787,330,000,000,000,000,000,000,000
11
fix recursion level crash (bb #3706). Thanks to Stephane Chazelas for the analysis.
static void gf_dom_refresh_event_filter(GF_SceneGraph *sg) { GF_SceneGraph *par; u32 prev_flags = sg->dom_evt_filter; sg->dom_evt_filter = 0; if (sg->nb_evts_mouse) sg->dom_evt_filter |= GF_DOM_EVENT_MOUSE; if (sg->nb_evts_focus) sg->dom_evt_filter |= GF_DOM_EVENT_FOCUS; if (sg->nb_evts_key) sg->dom_evt_filter |= GF_DOM_EVENT_KEY; if (sg->nb_evts_ui) sg->dom_evt_filter |= GF_DOM_EVENT_UI; if (sg->nb_evts_mutation) sg->dom_evt_filter |= GF_DOM_EVENT_MUTATION; if (sg->nb_evts_text) sg->dom_evt_filter |= GF_DOM_EVENT_TEXT; if (sg->nb_evts_smil) sg->dom_evt_filter |= GF_DOM_EVENT_SMIL; if (sg->nb_evts_laser) sg->dom_evt_filter |= GF_DOM_EVENT_LASER; if (sg->nb_evts_svg) sg->dom_evt_filter |= GF_DOM_EVENT_SVG; if (sg->nb_evts_media) sg->dom_evt_filter |= GF_DOM_EVENT_MEDIA; /*for each graph until top, update event filter*/ par = sg->parent_scene; while (par) { par->dom_evt_filter &= ~prev_flags; par->dom_evt_filter |= sg->dom_evt_filter; par = par->parent_scene; } }
0
[ "CWE-416" ]
gpac
9723dd0955894f2cb7be13b94cf7a47f2754b893
257,349,062,215,749,200,000,000,000,000,000,000,000
25
fixed #2109
void CIRCSock::PutIRC(const CString& sLine) { PutIRC(CMessage(sLine)); }
0
[ "CWE-20", "CWE-284" ]
znc
d22fef8620cdd87490754f607e7153979731c69d
67,614,457,566,072,690,000,000,000,000,000,000,000
3
Better cleanup lines coming from network. Thanks for Jeriko One <[email protected]> for finding and reporting this.
static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, unsigned int len, void *data) { struct xennet_gnttab_make_txreq *info = data; unsigned int id; struct xen_netif_tx_request *tx; grant_ref_t ref; /* convenient aliases */ struct page *page = info->page; struct netfront_queue *queue = info->queue; struct sk_buff *skb = info->skb; id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gfn, GNTMAP_readonly); queue->tx_skbs[id] = skb; queue->grant_tx_page[id] = page; queue->grant_tx_ref[id] = ref; info->tx_local.id = id; info->tx_local.gref = ref; info->tx_local.offset = offset; info->tx_local.size = len; info->tx_local.flags = 0; *tx = info->tx_local; /* * Put the request in the pending queue, it will be set to be pending * when the producer index is about to be raised. */ add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); info->tx = tx; info->size += info->tx_local.size; }
0
[]
linux
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
339,929,557,758,556,150,000,000,000,000,000,000,000
41
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses() The commit referenced below moved the invocation past the "next" label, without any explanation. In fact this allows misbehaving backends undue control over the domain the frontend runs in, as earlier detected errors require the skb to not be freed (it may be retained for later processing via xennet_move_rx_slot(), or it may simply be unsafe to have it freed). This is CVE-2022-33743 / XSA-405. Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront") Signed-off-by: Jan Beulich <[email protected]> Reviewed-by: Juergen Gross <[email protected]> Signed-off-by: Juergen Gross <[email protected]>
static OPJ_BOOL opj_tcd_t1_encode ( opj_tcd_t *p_tcd ) { opj_t1_t * l_t1; const OPJ_FLOAT64 * l_mct_norms; OPJ_UINT32 l_mct_numcomps = 0U; opj_tcp_t * l_tcp = p_tcd->tcp; l_t1 = opj_t1_create(OPJ_TRUE); if (l_t1 == 00) { return OPJ_FALSE; } if (l_tcp->mct == 1) { l_mct_numcomps = 3U; /* irreversible encoding */ if (l_tcp->tccps->qmfbid == 0) { l_mct_norms = opj_mct_get_mct_norms_real(); } else { l_mct_norms = opj_mct_get_mct_norms(); } } else { l_mct_numcomps = p_tcd->image->numcomps; l_mct_norms = (const OPJ_FLOAT64 *) (l_tcp->mct_norms); } if (! opj_t1_encode_cblks(l_t1, p_tcd->tcd_image->tiles , l_tcp, l_mct_norms, l_mct_numcomps)) { opj_t1_destroy(l_t1); return OPJ_FALSE; } opj_t1_destroy(l_t1); return OPJ_TRUE; }
0
[ "CWE-369" ]
openjpeg
8f9cc62b3f9a1da9712329ddcedb9750d585505c
207,684,976,924,986,400,000,000,000,000,000,000,000
36
Fix division by zero Fix uclouvain/openjpeg#733
e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer) { if (timer->running) { e1000e_intrmgr_rearm_timer(timer); } }
0
[ "CWE-835" ]
qemu
4154c7e03fa55b4cf52509a83d50d6c09d743b77
298,381,396,430,473,070,000,000,000,000,000,000,000
6
net: e1000e: fix an infinite loop issue This issue is like the issue in e1000 network card addressed in this commit: e1000: eliminate infinite loops on out-of-bounds transfer start. Signed-off-by: Li Qiang <[email protected]> Reviewed-by: Dmitry Fleytman <[email protected]> Signed-off-by: Jason Wang <[email protected]>
scroll_cursor_top(int min_scroll, int always) { int scrolled = 0; int extra = 0; int used; int i; linenr_T top; // just above displayed lines linenr_T bot; // just below displayed lines linenr_T old_topline = curwin->w_topline; #ifdef FEAT_DIFF linenr_T old_topfill = curwin->w_topfill; #endif linenr_T new_topline; int off = get_scrolloff_value(); if (mouse_dragging > 0) off = mouse_dragging - 1; /* * Decrease topline until: * - it has become 1 * - (part of) the cursor line is moved off the screen or * - moved at least 'scrolljump' lines and * - at least 'scrolloff' lines above and below the cursor */ validate_cheight(); used = curwin->w_cline_height; // includes filler lines above if (curwin->w_cursor.lnum < curwin->w_topline) scrolled = used; #ifdef FEAT_FOLDING if (hasFolding(curwin->w_cursor.lnum, &top, &bot)) { --top; ++bot; } else #endif { top = curwin->w_cursor.lnum - 1; bot = curwin->w_cursor.lnum + 1; } new_topline = top + 1; #ifdef FEAT_DIFF // "used" already contains the number of filler lines above, don't add it // again. // Hide filler lines above cursor line by adding them to "extra". extra += diff_check_fill(curwin, curwin->w_cursor.lnum); #endif /* * Check if the lines from "top" to "bot" fit in the window. If they do, * set new_topline and advance "top" and "bot" to include more lines. */ while (top > 0) { #ifdef FEAT_FOLDING if (hasFolding(top, &top, NULL)) // count one logical line for a sequence of folded lines i = 1; else #endif i = PLINES_NOFILL(top); used += i; if (extra + i <= off && bot < curbuf->b_ml.ml_line_count) { #ifdef FEAT_FOLDING if (hasFolding(bot, NULL, &bot)) // count one logical line for a sequence of folded lines ++used; else #endif used += plines(bot); } if (used > curwin->w_height) break; if (top < curwin->w_topline) scrolled += i; /* * If scrolling is needed, scroll at least 'sj' lines. */ if ((new_topline >= curwin->w_topline || scrolled > min_scroll) && extra >= off) break; extra += i; new_topline = top; --top; ++bot; } /* * If we don't have enough space, put cursor in the middle. * This makes sure we get the same position when using "k" and "j" * in a small window. */ if (used > curwin->w_height) scroll_cursor_halfway(FALSE); else { /* * If "always" is FALSE, only adjust topline to a lower value, higher * value may happen with wrapping lines */ if (new_topline < curwin->w_topline || always) curwin->w_topline = new_topline; if (curwin->w_topline > curwin->w_cursor.lnum) curwin->w_topline = curwin->w_cursor.lnum; #ifdef FEAT_DIFF curwin->w_topfill = diff_check_fill(curwin, curwin->w_topline); if (curwin->w_topfill > 0 && extra > off) { curwin->w_topfill -= extra - off; if (curwin->w_topfill < 0) curwin->w_topfill = 0; } check_topfill(curwin, FALSE); #endif if (curwin->w_topline != old_topline #ifdef FEAT_DIFF || curwin->w_topfill != old_topfill #endif ) curwin->w_valid &= ~(VALID_WROW|VALID_CROW|VALID_BOTLINE|VALID_BOTLINE_AP); curwin->w_valid |= VALID_TOPLINE; } }
0
[ "CWE-122" ]
vim
777e7c21b7627be80961848ac560cb0a9978ff43
150,651,827,368,098,730,000,000,000,000,000,000,000
130
patch 8.2.3564: invalid memory access when scrolling without valid screen Problem: Invalid memory access when scrolling without a valid screen. Solution: Do not set VALID_BOTLINE in w_valid.
static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid *sc) { struct xdr_stream xdr; struct compound_hdr hdr = { .nops = 1, }; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); return encode_setclientid(&xdr, sc); }
0
[ "CWE-703" ]
linux
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
329,678,407,054,629,000,000,000,000,000,000,000,000
11
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
static void parse_feature(const char *feature) { if (seen_data_command) die("Got feature command '%s' after data command", feature); if (parse_one_feature(feature, 1)) return; die("This version of fast-import does not support feature %s.", feature); }
0
[]
git
68061e3470210703cb15594194718d35094afdc0
278,093,642,077,259,200,000,000,000,000,000,000,000
10
fast-import: disallow "feature export-marks" by default The fast-import stream command "feature export-marks=<path>" lets the stream write marks to an arbitrary path. This may be surprising if you are running fast-import against an untrusted input (which otherwise cannot do anything except update Git objects and refs). Let's disallow the use of this feature by default, and provide a command-line option to re-enable it (you can always just use the command-line --export-marks as well, but the in-stream version provides an easy way for exporters to control the process). This is a backwards-incompatible change, since the default is flipping to the new, safer behavior. However, since the main users of the in-stream versions would be import/export-based remote helpers, and since we trust remote helpers already (which are already running arbitrary code), we'll pass the new option by default when reading a remote helper's stream. This should minimize the impact. Note that the implementation isn't totally simple, as we have to work around the fact that fast-import doesn't parse its command-line options until after it has read any "feature" lines from the stream. This is how it lets command-line options override in-stream. But in our case, it's important to parse the new --allow-unsafe-features first. There are three options for resolving this: 1. Do a separate "early" pass over the options. This is easy for us to do because there are no command-line options that allow the "unstuck" form (so there's no chance of us mistaking an argument for an option), though it does introduce a risk of incorrect parsing later (e.g,. if we convert to parse-options). 2. Move the option parsing phase back to the start of the program, but teach the stream-reading code never to override an existing value. This is tricky, because stream "feature" lines override each other (meaning we'd have to start tracking the source for every option). 3. Accept that we might parse a "feature export-marks" line that is forbidden, as long we don't _act_ on it until after we've parsed the command line options. This would, in fact, work with the current code, but only because the previous patch fixed the export-marks parser to avoid touching the filesystem. So while it works, it does carry risk of somebody getting it wrong in the future in a rather subtle and unsafe way. I've gone with option (1) here as simple, safe, and unlikely to cause regressions. This fixes CVE-2019-1348. Signed-off-by: Jeff King <[email protected]>