func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap) { int error; error = ext4_iomap_xattr_fiemap(inode, iomap); if (error == 0 && (offset >= iomap->length)) error = -ENOENT; return error; }
0
[ "CWE-703" ]
linux
ce9f24cccdc019229b70a5c15e2b09ad9c0ab5d1
327,289,713,650,698,920,000,000,000,000,000,000,000
11
ext4: check journal inode extents more carefully Currently, system zones just track ranges of block, that are "important" fs metadata (bitmaps, group descriptors, journal blocks, etc.). This however complicates how extent tree (or indirect blocks) can be checked for inodes that actually track such metadata - currently the journal inode but arguably we should be treating quota files or resize inode similarly. We cannot run __ext4_ext_check() on such metadata inodes when loading their extents as that would immediately trigger the validity checks and so we just hack around that and special-case the journal inode. This however leads to a situation that a journal inode which has extent tree of depth at least one can have invalid extent tree that gets unnoticed until ext4_cache_extents() crashes. To overcome this limitation, track inode number each system zone belongs to (0 is used for zones not belonging to any inode). We can then verify inode number matches the expected one when verifying extent tree and thus avoid the false errors. With this there's no need to to special-case journal inode during extent tree checking anymore so remove it. Fixes: 0a944e8a6c66 ("ext4: don't perform block validity checks on the journal inode") Reported-by: Wolfgang Frisch <[email protected]> Reviewed-by: Lukas Czerner <[email protected]> Signed-off-by: Jan Kara <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Theodore Ts'o <[email protected]>
static int parse_request(unsigned char *buf, int len, char *name, unsigned int size) { struct domain_hdr *hdr = (void *) buf; uint16_t qdcount = ntohs(hdr->qdcount); uint16_t arcount = ntohs(hdr->arcount); unsigned char *ptr; char *last_label = NULL; unsigned int remain, used = 0; if (len < 12) return -EINVAL; debug("id 0x%04x qr %d opcode %d qdcount %d arcount %d", hdr->id, hdr->qr, hdr->opcode, qdcount, arcount); if (hdr->qr != 0 || qdcount != 1) return -EINVAL; name[0] = '\0'; ptr = buf + sizeof(struct domain_hdr); remain = len - sizeof(struct domain_hdr); while (remain > 0) { uint8_t label_len = *ptr; if (label_len == 0x00) { last_label = (char *) (ptr + 1); break; } if (used + label_len + 1 > size) return -ENOBUFS; strncat(name, (char *) (ptr + 1), label_len); strcat(name, "."); used += label_len + 1; ptr += label_len + 1; remain -= label_len + 1; } if (last_label && arcount && remain >= 9 && last_label[4] == 0 && !memcmp(last_label + 5, opt_edns0_type, 2)) { uint16_t edns0_bufsize; edns0_bufsize = last_label[7] << 8 | last_label[8]; debug("EDNS0 buffer size %u", edns0_bufsize); /* This is an evil hack until full TCP support has been * implemented. * * Somtimes the EDNS0 request gets send with a too-small * buffer size. Since glibc doesn't seem to crash when it * gets a response biffer then it requested, just bump * the buffer size up to 4KiB. */ if (edns0_bufsize < 0x1000) { last_label[7] = 0x10; last_label[8] = 0x00; } } debug("query %s", name); return 0; }
0
[ "CWE-119" ]
connman
5c281d182ecdd0a424b64f7698f32467f8f67b71
19,789,822,685,137,637,000,000,000,000,000,000,000
71
dnsproxy: Fix crash on malformed DNS response If the response query string is malformed, we might access memory pass the end of "name" variable in parse_response().
TEST_F(QueryPlannerTest, OrAllThreeTightnesses) { addIndex(BSON("names" << 1)); runQuery( fromjson("{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/}," "{names: {$elemMatch: {$eq: 'thomas'}}}]}")); assertNumSolutions(2U); assertSolutionExists("{cscan: {dir: 1}}"); assertSolutionExists( "{fetch: {filter: " "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/}," "{names: {$elemMatch: {$eq: 'thomas'}}}]}, " "node: {ixscan: {filter: null, pattern: {names: 1}}}}}"); }
0
[]
mongo
ee97c0699fd55b498310996ee002328e533681a3
76,054,873,363,046,230,000,000,000,000,000,000,000
14
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
CIFSTCon(const unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; struct smb_hdr *smb_buffer_response; TCONX_REQ *pSMB; TCONX_RSP *pSMBr; unsigned char *bcc_ptr; int rc = 0; int length; __u16 bytes_left, count; if (ses == NULL) return -EIO; smb_buffer = cifs_buf_get(); if (smb_buffer == NULL) return -ENOMEM; smb_buffer_response = smb_buffer; header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, NULL /*no tid */ , 4 /*wct */ ); smb_buffer->Mid = get_next_mid(ses->server); smb_buffer->Uid = ses->Suid; pSMB = (TCONX_REQ *) smb_buffer; pSMBr = (TCONX_RSP *) smb_buffer_response; pSMB->AndXCommand = 0xFF; pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); bcc_ptr = &pSMB->Password[0]; if (!tcon || (ses->server->sec_mode & SECMODE_USER)) { pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ *bcc_ptr = 0; /* password is null byte */ bcc_ptr++; /* skip password */ /* already aligned so no need to do it below */ } else { pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* BB FIXME add code to fail this if NTLMv2 or Kerberos specified as required (when that support is added to the vfs in the future) as only NTLM or the much weaker LANMAN (which we do not send by default) is accepted by Samba (not sure whether other servers allow NTLMv2 password here) */ #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((global_secflags & CIFSSEC_MAY_LANMAN) && (ses->server->secType == LANMAN)) calc_lanman_hash(tcon->password, ses->server->cryptkey, ses->server->sec_mode & SECMODE_PW_ENCRYPT ? true : false, bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr, nls_codepage); bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { /* must align unicode strings */ *bcc_ptr = 0; /* null byte password */ bcc_ptr++; } } if (ses->server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_STATUS32) { smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; } if (ses->capabilities & CAP_DFS) { smb_buffer->Flags2 |= SMBFLG2_DFS; } if (ses->capabilities & CAP_UNICODE) { smb_buffer->Flags2 |= SMBFLG2_UNICODE; length = cifs_strtoUTF16((__le16 *) bcc_ptr, tree, 6 /* max utf8 char length in bytes */ * (/* server len*/ + 256 /* share len */), nls_codepage); bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ bcc_ptr += 2; /* skip trailing null */ } else { /* ASCII */ strcpy(bcc_ptr, tree); bcc_ptr += strlen(tree) + 1; } strcpy(bcc_ptr, "?????"); bcc_ptr += strlen("?????"); bcc_ptr += 1; count = bcc_ptr - &pSMB->Password[0]; pSMB->hdr.smb_buf_length = cpu_to_be32(be32_to_cpu( pSMB->hdr.smb_buf_length) + count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0); /* above now done in SendReceive */ if ((rc == 0) && (tcon != NULL)) { bool is_unicode; tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = smb_buffer_response->Tid; bcc_ptr = pByteArea(smb_buffer_response); bytes_left = get_bcc(smb_buffer_response); length = strnlen(bcc_ptr, bytes_left - 2); if (smb_buffer->Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* skip service field (NB: this field is always ASCII) */ if (length == 3) { if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && (bcc_ptr[2] == 'C')) { cifs_dbg(FYI, "IPC connection\n"); tcon->ipc = 1; } } else if (length == 2) { if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { /* the most common case */ cifs_dbg(FYI, "disk share connection\n"); } } bcc_ptr += length + 1; bytes_left -= (length + 1); strncpy(tcon->treeName, tree, MAX_TREE_SIZE); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr, bytes_left, is_unicode, nls_codepage); cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem); if ((smb_buffer_response->WordCount == 3) || (smb_buffer_response->WordCount == 7)) /* field is in same location */ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); else tcon->Flags = 0; cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags); } else if ((rc == 0) && tcon == NULL) { /* all we need to save for IPC$ connection */ ses->ipc_tid = smb_buffer_response->Tid; } cifs_buf_release(smb_buffer); return rc; }
0
[ "CWE-703", "CWE-189" ]
linux
1fc29bacedeabb278080e31bb9c1ecb49f143c3b
134,977,577,166,594,480,000,000,000,000,000,000,000
155
cifs: fix off-by-one bug in build_unc_path_to_root commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed the code such that the vol->prepath no longer contained a leading delimiter and then fixed up the places that accessed that field to account for that change. One spot in build_unc_path_to_root was missed however. When doing the pointer addition on pos, that patch failed to account for the fact that we had already incremented "pos" by one when adding the length of the prepath. This caused a buffer overrun by one byte. This patch fixes the problem by correcting the handling of "pos". Cc: <[email protected]> # v3.8+ Reported-by: Marcus Moeller <[email protected]> Reported-by: Ken Fallon <[email protected]> Signed-off-by: Jeff Layton <[email protected]> Signed-off-by: Steve French <[email protected]>
bool canCoalesce() const override { return false; }
0
[ "CWE-401" ]
envoy
5eba69a1f375413fb93fab4173f9c393ac8c2818
225,804,140,223,717,440,000,000,000,000,000,000,000
1
[buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144) Signed-off-by: antonio <[email protected]>
vips_foreign_find_load( const char *name ) { char filename[VIPS_PATH_MAX]; char option_string[VIPS_PATH_MAX]; VipsForeignLoadClass *load_class; vips__filename_split8( name, filename, option_string ); if( !vips_existsf( "%s", filename ) ) { vips_error( "VipsForeignLoad", _( "file \"%s\" not found" ), name ); return( NULL ); } if( !(load_class = (VipsForeignLoadClass *) vips_foreign_map( "VipsForeignLoad", (VipsSListMap2Fn) vips_foreign_find_load_sub, (void *) filename, NULL )) ) { vips_error( "VipsForeignLoad", _( "\"%s\" is not a known file format" ), name ); return( NULL ); } #ifdef DEBUG printf( "vips_foreign_find_load: selected %s\n", VIPS_OBJECT_CLASS( load_class )->nickname ); #endif /*DEBUG*/ return( G_OBJECT_CLASS_NAME( load_class ) ); }
0
[ "CWE-362", "CWE-476" ]
libvips
20d840e6da15c1574b3ed998bc92f91d1e36c2a5
52,069,683,243,928,680,000,000,000,000,000,000,000
30
fix a crash with delayed load If a delayed load failed, it could leave the pipeline only half-set up. Sebsequent threads could then segv. Set a load-has-failed flag and test before generate. See https://github.com/jcupitt/libvips/issues/893
void region16_clear(REGION16* region) { assert(region); assert(region->data); if ((region->data->size > 0) && (region->data != &empty_region)) free(region->data); region->data = &empty_region; ZeroMemory(&region->extents, sizeof(region->extents)); }
0
[ "CWE-401" ]
FreeRDP
9fee4ae076b1ec97b97efb79ece08d1dab4df29a
33,192,124,713,010,820,000,000,000,000,000,000,000
11
Fixed #5645: realloc return handling
static inline int num_user_pages(unsigned long addr, unsigned long len) { const unsigned long spage = addr & PAGE_MASK; const unsigned long epage = (addr + len - 1) & PAGE_MASK; return 1 + ((epage - spage) >> PAGE_SHIFT); }
0
[ "CWE-416" ]
linux
3d2a9d642512c21a12d19b9250e7a835dcb41a79
95,052,685,938,463,670,000,000,000,000,000,000,000
8
IB/hfi1: Ensure correct mm is used at all times Two earlier bug fixes have created a security problem in the hfi1 driver. One fix aimed to solve an issue where current->mm was not valid when closing the hfi1 cdev. It attempted to do this by saving a cached value of the current->mm pointer at file open time. This is a problem if another process with access to the FD calls in via write() or ioctl() to pin pages via the hfi driver. The other fix tried to solve a use after free by taking a reference on the mm. To fix this correctly we use the existing cached value of the mm in the mmu notifier. Now we can check in the insert, evict, etc. routines that current->mm matched what the notifier was registered for. If not, then don't allow access. The register of the mmu notifier will save the mm pointer. Since in do_exit() the exit_mm() is called before exit_files(), which would call our close routine a reference is needed on the mm. We rely on the mmgrab done by the registration of the notifier, whereas before it was explicit. The mmu notifier deregistration happens when the user context is torn down, the creation of which triggered the registration. Also of note is we do not do any explicit work to protect the interval tree notifier. It doesn't seem that this is going to be needed since we aren't actually doing anything with current->mm. The interval tree notifier stuff still has a FIXME noted from a previous commit that will be addressed in a follow on patch. Cc: <[email protected]> Fixes: e0cf75deab81 ("IB/hfi1: Fix mm_struct use after free") Fixes: 3faa3d9a308e ("IB/hfi1: Make use of mm consistent") Link: https://lore.kernel.org/r/[email protected] Suggested-by: Jann Horn <[email protected]> Reported-by: Jason Gunthorpe <[email protected]> Reviewed-by: Ira Weiny <[email protected]> Reviewed-by: Mike Marciniszyn <[email protected]> Signed-off-by: Dennis Dalessandro <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
__releases(fiq->waitq.lock) { int err; struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); struct fuse_forget_in arg = { .nlookup = forget->forget_one.nlookup, }; struct fuse_in_header ih = { .opcode = FUSE_FORGET, .nodeid = forget->forget_one.nodeid, .unique = fuse_get_unique(fiq), .len = sizeof(ih) + sizeof(arg), }; spin_unlock(&fiq->waitq.lock); kfree(forget); if (nbytes < ih.len) return -EINVAL; err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) err = fuse_copy_one(cs, &arg, sizeof(arg)); fuse_copy_finish(cs); if (err) return err; return ih.len; }
0
[ "CWE-416" ]
linux
15fab63e1e57be9fdb5eec1bbc5916e9825e9acb
204,524,444,795,283,700,000,000,000,000,000,000,000
29
fs: prevent page refcount overflow in pipe_buf_get Change pipe_buf_get() to return a bool indicating whether it succeeded in raising the refcount of the page (if the thing in the pipe is a page). This removes another mechanism for overflowing the page refcount. All callers converted to handle a failure. Reported-by: Jann Horn <[email protected]> Signed-off-by: Matthew Wilcox <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
static inline pte_t * fixmap_pte(unsigned long addr) { return &bm_pte[pte_index(addr)]; }
0
[]
linux
15122ee2c515a253b0c66a3e618bc7ebe35105eb
82,697,171,418,465,050,000,000,000,000,000,000,000
4
arm64: Enforce BBM for huge IO/VMAP mappings ioremap_page_range doesn't honour break-before-make and attempts to put down huge mappings (using p*d_set_huge) over the top of pre-existing table entries. This leads to us leaking page table memory and also gives rise to TLB conflicts and spurious aborts, which have been seen in practice on Cortex-A75. Until this has been resolved, refuse to put block mappings when the existing entry is found to be present. Fixes: 324420bf91f60 ("arm64: add support for ioremap() block mappings") Reported-by: Hanjun Guo <[email protected]> Reported-by: Lei Li <[email protected]> Acked-by: Ard Biesheuvel <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Catalin Marinas <[email protected]>
static const SIGALG_LOOKUP *tls1_get_legacy_sigalg(const SSL *s, int idx) { if (idx == -1) { if (s->server) { size_t i; /* Work out index corresponding to ciphersuite */ for (i = 0; i < SSL_PKEY_NUM; i++) { const SSL_CERT_LOOKUP *clu = ssl_cert_lookup_by_idx(i); if (clu->amask & s->s3.tmp.new_cipher->algorithm_auth) { idx = i; break; } } /* * Some GOST ciphersuites allow more than one signature algorithms * */ if (idx == SSL_PKEY_GOST01 && s->s3.tmp.new_cipher->algorithm_auth != SSL_aGOST01) { int real_idx; for (real_idx = SSL_PKEY_GOST12_512; real_idx >= SSL_PKEY_GOST01; real_idx--) { if (s->cert->pkeys[real_idx].privatekey != NULL) { idx = real_idx; break; } } } } else { idx = s->cert->key - s->cert->pkeys; } } if (idx < 0 || idx >= (int)OSSL_NELEM(tls_default_sigalg)) return NULL; if (SSL_USE_SIGALGS(s) || idx != SSL_PKEY_RSA) { const SIGALG_LOOKUP *lu = tls1_lookup_sigalg(tls_default_sigalg[idx]); if (!tls1_lookup_md(lu, NULL)) return NULL; return lu; } return &legacy_rsa_sigalg; }
0
[ "CWE-476" ]
openssl
5235ef44b93306a14d0b6c695b13c64b16e1fdec
227,879,038,051,038,340,000,000,000,000,000,000,000
45
Fix SSL_check_chain() The function SSL_check_chain() can be used by applications to check that a cert and chain is compatible with the negotiated parameters. This could be useful (for example) from the certificate callback. Unfortunately this function was applying TLSv1.2 sig algs rules and did not work correctly if TLSv1.3 was negotiated. We refactor tls_choose_sigalg to split it up and create a new function find_sig_alg which can (optionally) take a certificate and key as parameters and find an appropriate sig alg if one exists. If the cert and key are not supplied then we try to find a cert and key from the ones we have available that matches the shared sig algs. Reviewed-by: Tomas Mraz <[email protected]> (Merged from https://github.com/openssl/openssl/pull/9442)
cockpit_web_response_queue (CockpitWebResponse *self, GBytes *block) { QueueStep qn = { .response = self }; g_return_val_if_fail (COCKPIT_IS_WEB_RESPONSE (self), FALSE); g_return_val_if_fail (block != NULL, FALSE); g_return_val_if_fail (self->complete == FALSE, FALSE); if (self->failed) { g_debug ("%s: ignoring queued block after failure", self->logname); return FALSE; } if (g_strcmp0 (self->method, "HEAD") == 0) { g_debug ("%s: ignoring queued block for method HEAD", self->logname); return TRUE; } qn.filters = self->filters; queue_filter (&qn, block); return TRUE; }
0
[ "CWE-1021" ]
cockpit
8d9bc10d8128aae03dfde62fd00075fe492ead10
294,552,989,170,949,900,000,000,000,000,000,000,000
25
common: Restrict frame embedding to same origin Declare `X-Frame-Options: sameorigin` [1] so that cockpit frames can only be embedded into pages coming from the same origin. This is similar to setting CORP in commit 2b38b8de92f9a (which applies to `<script>`, `<img>`, etc.). The main use case for embedding is to run cockpit-ws behind a reverse proxy, while also serving other pages. Cross-origin embedding is discouraged these days to prevent "clickjacking". Cross-origin embedding already did not work in most cases: Frames would always just show the login page. However, this looks confusing and is unclean. With X-Frame-Options, the browser instead shows an explanatory error page. Mention the same origin requirement in the embedding documentation. Fixes #16122 https://bugzilla.redhat.com/show_bug.cgi?id=1980688 CVE-2021-3660 [1] https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options
smacro_defined(Context *ctx, const char *name, int nparam, SMacro **defn, bool nocase, bool find_alias) { struct hash_table *smtbl; SMacro *m; smtbl = ctx ? &ctx->localmac : &smacros; restart: m = (SMacro *) hash_findix(smtbl, name); while (m) { if (!mstrcmp(m->name, name, m->casesense && nocase) && (nparam <= 0 || m->nparam == 0 || nparam == m->nparam || (m->greedy && nparam >= m->nparam-1))) { if (m->alias && !find_alias) { if (!ppopt.noaliases) { name = tok_text(m->expansion); goto restart; } else { continue; } } if (defn) *defn = m; return true; } m = m->next; } return false; }
0
[]
nasm
6299a3114ce0f3acd55d07de201a8ca2f0a83059
252,163,670,518,959,700,000,000,000,000,000,000,000
32
BR 3392708: fix NULL pointer reference for invalid %stacksize After issuing an error message for a missing %stacksize argument, need to quit rather than continuing to try to access the pointer. Fold uses of tok_text() while we are at it. Reported-by: Suhwan <[email protected]> Signed-off-by: H. Peter Anvin (Intel) <[email protected]>
DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault) { /* On Xen PV, DF doesn't use IST. The C part is the same as native. */ exc_double_fault(regs, error_code); }
0
[ "CWE-703" ]
linux
96e8fc5818686d4a1591bb6907e7fdb64ef29884
192,973,845,662,333,450,000,000,000,000,000,000,000
5
x86/xen: Use clear_bss() for Xen PV guests Instead of clearing the bss area in assembly code, use the clear_bss() function. This requires to pass the start_info address as parameter to xen_start_kernel() in order to avoid the xen_start_info being zeroed again. Signed-off-by: Juergen Gross <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Jan Beulich <[email protected]> Reviewed-by: Boris Ostrovsky <[email protected]> Link: https://lore.kernel.org/r/[email protected]
static VALUE cState_space_before_set(VALUE self, VALUE space_before) { unsigned long len; GET_STATE(self); Check_Type(space_before, T_STRING); len = RSTRING_LEN(space_before); if (len == 0) { if (state->space_before) { ruby_xfree(state->space_before); state->space_before = NULL; state->space_before_len = 0; } } else { if (state->space_before) ruby_xfree(state->space_before); state->space_before = fstrndup(RSTRING_PTR(space_before), len); state->space_before_len = len; } return Qnil; }
0
[ "CWE-119", "CWE-787" ]
json
8f782fd8e181d9cfe9387ded43a5ca9692266b85
14,134,876,294,996,421,000,000,000,000,000,000,000
19
Fix arbitrary heap exposure problem
psf_fread (void *ptr, sf_count_t bytes, sf_count_t items, SF_PRIVATE *psf) { sf_count_t total = 0 ; ssize_t count ; if (psf->virtual_io) return psf->vio.read (ptr, bytes*items, psf->vio_user_data) / bytes ; items *= bytes ; /* Do this check after the multiplication above. */ if (items <= 0) return 0 ; while (items > 0) { /* Break the read down to a sensible size. */ count = (items > SENSIBLE_SIZE) ? SENSIBLE_SIZE : (ssize_t) items ; count = read (psf->file.filedes, ((char*) ptr) + total, (size_t) count) ; if (count == -1) { if (errno == EINTR) continue ; psf_log_syserr (psf, errno) ; break ; } ; if (count == 0) break ; total += count ; items -= count ; } ; if (psf->is_pipe) psf->pipeoffset += total ; return total / bytes ; } /* psf_fread */
0
[ "CWE-369", "CWE-189" ]
libsndfile
725c7dbb95bfaf8b4bb7b04820e3a00cceea9ce6
270,048,539,995,698,500,000,000,000,000,000,000,000
39
src/file_io.c : Prevent potential divide-by-zero. Closes: https://github.com/erikd/libsndfile/issues/92
mm_answer_sessid(int sock, Buffer *m) { int i; debug3("%s entering", __func__); if (buffer_len(m) != 16) fatal("%s: bad ssh1 session id", __func__); for (i = 0; i < 16; i++) session_id[i] = buffer_get_char(m); /* Turn on permissions for getpwnam */ monitor_permit(mon_dispatch, MONITOR_REQ_PWNAM, 1); return (0); }
0
[ "CWE-20", "CWE-200" ]
openssh-portable
d4697fe9a28dab7255c60433e4dd23cf7fce8a8b
81,021,061,584,964,730,000,000,000,000,000,000,000
16
Don't resend username to PAM; it already has it. Pointed out by Moritz Jodeit; ok dtucker@
static int load_balance(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *continue_balancing) { int ld_moved, cur_ld_moved, active_balance = 0; struct sched_domain *sd_parent = sd->parent; struct sched_group *group; struct rq *busiest; struct rq_flags rf; struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); struct lb_env env = { .sd = sd, .dst_cpu = this_cpu, .dst_rq = this_rq, .dst_grpmask = sched_group_span(sd->groups), .idle = idle, .loop_break = sched_nr_migrate_break, .cpus = cpus, .fbq_type = all, .tasks = LIST_HEAD_INIT(env.tasks), }; cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); schedstat_inc(sd->lb_count[idle]); redo: if (!should_we_balance(&env)) { *continue_balancing = 0; goto out_balanced; } group = find_busiest_group(&env); if (!group) { schedstat_inc(sd->lb_nobusyg[idle]); goto out_balanced; } busiest = find_busiest_queue(&env, group); if (!busiest) { schedstat_inc(sd->lb_nobusyq[idle]); goto out_balanced; } BUG_ON(busiest == env.dst_rq); schedstat_add(sd->lb_imbalance[idle], env.imbalance); env.src_cpu = busiest->cpu; env.src_rq = busiest; ld_moved = 0; if (busiest->nr_running > 1) { /* * Attempt to move tasks. If find_busiest_group has found * an imbalance but busiest->nr_running <= 1, the group is * still unbalanced. ld_moved simply stays zero, so it is * correctly treated as an imbalance. */ env.flags |= LBF_ALL_PINNED; env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); more_balance: rq_lock_irqsave(busiest, &rf); update_rq_clock(busiest); /* * cur_ld_moved - load moved in current iteration * ld_moved - cumulative load moved across iterations */ cur_ld_moved = detach_tasks(&env); /* * We've detached some tasks from busiest_rq. Every * task is masked "TASK_ON_RQ_MIGRATING", so we can safely * unlock busiest->lock, and we are able to be sure * that nobody can manipulate the tasks in parallel. * See task_rq_lock() family for the details. */ rq_unlock(busiest, &rf); if (cur_ld_moved) { attach_tasks(&env); ld_moved += cur_ld_moved; } local_irq_restore(rf.flags); if (env.flags & LBF_NEED_BREAK) { env.flags &= ~LBF_NEED_BREAK; goto more_balance; } /* * Revisit (affine) tasks on src_cpu that couldn't be moved to * us and move them to an alternate dst_cpu in our sched_group * where they can run. The upper limit on how many times we * iterate on same src_cpu is dependent on number of CPUs in our * sched_group. * * This changes load balance semantics a bit on who can move * load to a given_cpu. In addition to the given_cpu itself * (or a ilb_cpu acting on its behalf where given_cpu is * nohz-idle), we now have balance_cpu in a position to move * load to given_cpu. In rare situations, this may cause * conflicts (balance_cpu and given_cpu/ilb_cpu deciding * _independently_ and at _same_ time to move some load to * given_cpu) causing exceess load to be moved to given_cpu. * This however should not happen so much in practice and * moreover subsequent load balance cycles should correct the * excess load moved. */ if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { /* Prevent to re-select dst_cpu via env's CPUs */ cpumask_clear_cpu(env.dst_cpu, env.cpus); env.dst_rq = cpu_rq(env.new_dst_cpu); env.dst_cpu = env.new_dst_cpu; env.flags &= ~LBF_DST_PINNED; env.loop = 0; env.loop_break = sched_nr_migrate_break; /* * Go back to "more_balance" rather than "redo" since we * need to continue with same src_cpu. */ goto more_balance; } /* * We failed to reach balance because of affinity. */ if (sd_parent) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) *group_imbalance = 1; } /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(env.flags & LBF_ALL_PINNED)) { cpumask_clear_cpu(cpu_of(busiest), cpus); /* * Attempting to continue load balancing at the current * sched_domain level only makes sense if there are * active CPUs remaining as possible busiest CPUs to * pull load from which are not contained within the * destination group that is receiving any migrated * load. */ if (!cpumask_subset(cpus, env.dst_grpmask)) { env.loop = 0; env.loop_break = sched_nr_migrate_break; goto redo; } goto out_all_pinned; } } if (!ld_moved) { schedstat_inc(sd->lb_failed[idle]); /* * Increment the failure counter only on periodic balance. * We do not want newidle balance, which can be very * frequent, pollute the failure counter causing * excessive cache_hot migrations and active balances. */ if (idle != CPU_NEWLY_IDLE) sd->nr_balance_failed++; if (need_active_balance(&env)) { unsigned long flags; raw_spin_lock_irqsave(&busiest->lock, flags); /* * Don't kick the active_load_balance_cpu_stop, * if the curr task on busiest CPU can't be * moved to this_cpu: */ if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { raw_spin_unlock_irqrestore(&busiest->lock, flags); env.flags |= LBF_ALL_PINNED; goto out_one_pinned; } /* * ->active_balance synchronizes accesses to * ->active_balance_work. Once set, it's cleared * only after active load balance is finished. */ if (!busiest->active_balance) { busiest->active_balance = 1; busiest->push_cpu = this_cpu; active_balance = 1; } raw_spin_unlock_irqrestore(&busiest->lock, flags); if (active_balance) { stop_one_cpu_nowait(cpu_of(busiest), active_load_balance_cpu_stop, busiest, &busiest->active_balance_work); } /* We've kicked active balancing, force task migration. */ sd->nr_balance_failed = sd->cache_nice_tries+1; } } else sd->nr_balance_failed = 0; if (likely(!active_balance)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; } else { /* * If we've begun active balancing, start to back off. This * case may not be covered by the all_pinned logic if there * is only 1 task on the busy runqueue (because we don't call * detach_tasks). */ if (sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; } goto out; out_balanced: /* * We reach balance although we may have faced some affinity * constraints. Clear the imbalance flag if it was set. */ if (sd_parent) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if (*group_imbalance) *group_imbalance = 0; } out_all_pinned: /* * We reach balance because all tasks are pinned at this level so * we can't migrate them. Let the imbalance flag set so parent level * can try to migrate them. */ schedstat_inc(sd->lb_balanced[idle]); sd->nr_balance_failed = 0; out_one_pinned: ld_moved = 0; /* * idle_balance() disregards balance intervals, so we could repeatedly * reach this code, which would lead to balance_interval skyrocketting * in a short amount of time. Skip the balance_interval increase logic * to avoid that. */ if (env.idle == CPU_NEWLY_IDLE) goto out; /* tune up the balancing interval */ if ((env.flags & LBF_ALL_PINNED && sd->balance_interval < MAX_PINNED_INTERVAL) || sd->balance_interval < sd->max_interval) sd->balance_interval *= 2; out: return ld_moved; }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
282,959,434,496,254,900,000,000,000,000,000,000,000
272
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <[email protected]> Analyzed-by: Vincent Guittot <[email protected]> Reported-by: Zhipeng Xie <[email protected]> Reported-by: Sargun Dhillon <[email protected]> Reported-by: Xie XiuQi <[email protected]> Tested-by: Zhipeng Xie <[email protected]> Tested-by: Sargun Dhillon <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Acked-by: Vincent Guittot <[email protected]> Cc: <[email protected]> # v4.13+ Cc: Bin Li <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
rsvg_paint_server_rad_grad (RsvgRadialGradient * gradient) { RsvgPaintServer *result = g_new (RsvgPaintServer, 1); result->refcnt = 1; result->type = RSVG_PAINT_SERVER_RAD_GRAD; result->core.radgrad = gradient; return result; }
0
[]
librsvg
34c95743ca692ea0e44778e41a7c0a129363de84
314,881,081,232,711,470,000,000,000,000,000,000,000
10
Store node type separately in RsvgNode The node name (formerly RsvgNode:type) cannot be used to infer the sub-type of RsvgNode that we're dealing with, since for unknown elements we put type = node-name. This lead to a (potentially exploitable) crash e.g. when the element name started with "fe" which tricked the old code into considering it as a RsvgFilterPrimitive. CVE-2011-3146 https://bugzilla.gnome.org/show_bug.cgi?id=658014
TRIO_PUBLIC int trio_scanfv TRIO_ARGS2((format, args), TRIO_CONST char* format, trio_pointer_t* args) { static va_list unused; assert(VALID(format)); return TrioScan((trio_pointer_t)stdin, 0, TrioInStreamFile, TrioUndoStreamFile, format, unused, TrioArrayGetter, args); }
0
[ "CWE-190", "CWE-125" ]
FreeRDP
05cd9ea2290d23931f615c1b004d4b2e69074e27
96,791,828,562,673,900,000,000,000,000,000,000,000
10
Fixed TrioParse and trio_length limts. CVE-2020-4030 thanks to @antonio-morales for finding this.
static int __do_execve_file(int fd, struct filename *filename, struct user_arg_ptr argv, struct user_arg_ptr envp, int flags, struct file *file) { char *pathbuf = NULL; struct linux_binprm *bprm; struct files_struct *displaced; int retval; if (IS_ERR(filename)) return PTR_ERR(filename); /* * We move the actual failure in case of RLIMIT_NPROC excess from * set*uid() to execve() because too many poorly written programs * don't check setuid() return code. Here we additionally recheck * whether NPROC limit is still exceeded. */ if ((current->flags & PF_NPROC_EXCEEDED) && atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) { retval = -EAGAIN; goto out_ret; } /* We're below the limit (still or again), so we don't want to make * further execve() calls fail. */ current->flags &= ~PF_NPROC_EXCEEDED; retval = unshare_files(&displaced); if (retval) goto out_ret; retval = -ENOMEM; bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); if (!bprm) goto out_files; retval = prepare_bprm_creds(bprm); if (retval) goto out_free; check_unsafe_exec(bprm); current->in_execve = 1; if (!file) file = do_open_execat(fd, filename, flags); retval = PTR_ERR(file); if (IS_ERR(file)) goto out_unmark; sched_exec(); bprm->file = file; if (!filename) { bprm->filename = "none"; } else if (fd == AT_FDCWD || filename->name[0] == '/') { bprm->filename = filename->name; } else { if (filename->name[0] == '\0') pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); else pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", fd, filename->name); if (!pathbuf) { retval = -ENOMEM; goto out_unmark; } /* * Record that a name derived from an O_CLOEXEC fd will be * inaccessible after exec. Relies on having exclusive access to * current->files (due to unshare_files above). */ if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; bprm->filename = pathbuf; } bprm->interp = bprm->filename; retval = bprm_mm_init(bprm); if (retval) goto out_unmark; retval = prepare_arg_pages(bprm, argv, envp); if (retval < 0) goto out; retval = prepare_binprm(bprm); if (retval < 0) goto out; retval = copy_strings_kernel(1, &bprm->filename, bprm); if (retval < 0) goto out; bprm->exec = bprm->p; retval = copy_strings(bprm->envc, envp, bprm); if (retval < 0) goto out; retval = copy_strings(bprm->argc, argv, bprm); if (retval < 0) goto out; would_dump(bprm, bprm->file); retval = exec_binprm(bprm); if (retval < 0) goto out; /* execve succeeded */ current->fs->in_exec = 0; current->in_execve = 0; membarrier_execve(current); rseq_execve(current); acct_update_integrals(current); task_numa_free(current, false); free_bprm(bprm); kfree(pathbuf); if (filename) putname(filename); if (displaced) put_files_struct(displaced); return retval; out: if (bprm->mm) { acct_arg_size(bprm, 0); mmput(bprm->mm); } out_unmark: current->fs->in_exec = 0; current->in_execve = 0; out_free: free_bprm(bprm); kfree(pathbuf); out_files: if (displaced) reset_files_struct(displaced); out_ret: if (filename) putname(filename); return retval; }
0
[ "CWE-416" ]
linux
16d51a590a8ce3befb1308e0e7ab77f3b661af33
3,772,737,878,122,072,000,000,000,000,000,000,000
147
sched/fair: Don't free p->numa_faults with concurrent readers When going through execve(), zero out the NUMA fault statistics instead of freeing them. During execve, the task is reachable through procfs and the scheduler. A concurrent /proc/*/sched reader can read data from a freed ->numa_faults allocation (confirmed by KASAN) and write it back to userspace. I believe that it would also be possible for a use-after-free read to occur through a race between a NUMA fault and execve(): task_numa_fault() can lead to task_numa_compare(), which invokes task_weight() on the currently running task of a different CPU. Another way to fix this would be to make ->numa_faults RCU-managed or add extra locking, but it seems easier to wipe the NUMA fault statistics on execve. Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Will Deacon <[email protected]> Fixes: 82727018b0d3 ("sched/numa: Call task_numa_free() from do_execve()") Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static void test_modules() { assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.constants.one + 1 == tests.constants.two \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.constants.foo == \"foo\" \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.constants.empty == \"\" \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.empty() == \"\" \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.struct_array[1].i == 1 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.struct_array[0].i == 1 or true \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.integer_array[0] == 0 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.integer_array[1] == 1 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.string_array[0] == \"foo\" \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.string_array[2] == \"baz\" \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.string_dict[\"foo\"] == \"foo\" \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.string_dict[\"bar\"] == \"bar\" \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.isum(1,2) == 3 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.isum(1,2,3) == 6 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.fsum(1.0,2.0) == 3.0 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.fsum(1.0,2.0,3.0) == 6.0 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { \ condition: tests.length(\"dummy\") == 5 \ }", NULL); assert_false_rule( "import \"tests\" \ rule test { condition: tests.struct_array[0].i == 1 \ }", NULL); assert_false_rule( "import \"tests\" \ rule test { condition: tests.isum(1,1) == 3 \ }", NULL); assert_false_rule( "import \"tests\" \ rule test { condition: tests.fsum(1.0,1.0) == 3.0 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { condition: tests.match(/foo/,\"foo\") == 3 \ }", NULL); assert_true_rule( "import \"tests\" \ rule test { condition: tests.match(/foo/,\"bar\") == -1\ }", NULL); assert_true_rule( "import \"tests\" \ rule test { condition: tests.match(/foo.bar/i,\"FOO\\nBAR\") == -1\ }", NULL); assert_true_rule( "import \"tests\" \ rule test { condition: tests.match(/foo.bar/is,\"FOO\\nBAR\") == 7\ }", NULL); assert_error( "import \"\\x00\"", ERROR_INVALID_MODULE_NAME); assert_error( "import \"\"", ERROR_INVALID_MODULE_NAME); }
1
[ "CWE-416" ]
yara
053e67e3ec81cc9268ce30eaf0d6663d8639ed1e
262,461,564,299,108,360,000,000,000,000,000,000,000
172
Fix issue #658
static bool ieee80211_validate_radiotap_len(struct sk_buff *skb) { struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *)skb->data; /* check for not even having the fixed radiotap header part */ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) return false; /* too short to be possibly valid */ /* is it a header version we can trust to find length from? */ if (unlikely(rthdr->it_version)) return false; /* only version 0 is supported */ /* does the skb contain enough to deliver on the alleged length? */ if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data))) return false; /* skb too short for claimed rt header extent */ return true; }
0
[ "CWE-476" ]
linux
bddc0c411a45d3718ac535a070f349be8eca8d48
279,820,716,399,747,250,000,000,000,000,000,000,000
19
mac80211: Fix NULL ptr deref for injected rate info The commit cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx queue") moved the code to validate the radiotap header from ieee80211_monitor_start_xmit to ieee80211_parse_tx_radiotap. This made is possible to share more code with the new Tx queue selection code for injected frames. But at the same time, it now required the call of ieee80211_parse_tx_radiotap at the beginning of functions which wanted to handle the radiotap header. And this broke the rate parser for radiotap header parser. The radiotap parser for rates is operating most of the time only on the data in the actual radiotap header. But for the 802.11a/b/g rates, it must also know the selected band from the chandef information. But this information is only written to the ieee80211_tx_info at the end of the ieee80211_monitor_start_xmit - long after ieee80211_parse_tx_radiotap was already called. The info->band information was therefore always 0 (NL80211_BAND_2GHZ) when the parser code tried to access it. For a 5GHz only device, injecting a frame with 802.11a rates would cause a NULL pointer dereference because local->hw.wiphy->bands[NL80211_BAND_2GHZ] would most likely have been NULL when the radiotap parser searched for the correct rate index of the driver. Cc: [email protected] Reported-by: Ben Greear <[email protected]> Fixes: cb17ed29a7a5 ("mac80211: parse radiotap header when selecting Tx queue") Signed-off-by: Mathy Vanhoef <[email protected]> [[email protected]: added commit message] Signed-off-by: Sven Eckelmann <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Johannes Berg <[email protected]>
char *get_name_from_nsid(int nsid) { struct nsid_cache *c; netns_nsid_socket_init(); netns_map_init(); c = netns_map_get_by_nsid(nsid); if (c) return c->name; return NULL; }
0
[ "CWE-416" ]
iproute2
9bf2c538a0eb10d66e2365a655bf6c52f5ba3d10
280,371,845,126,593,100,000,000,000,000,000,000,000
13
ipnetns: use-after-free problem in get_netnsid_from_name func Follow the following steps: # ip netns add net1 # export MALLOC_MMAP_THRESHOLD_=0 # ip netns list then Segmentation fault (core dumped) will occur. In get_netnsid_from_name func, answer is freed before rta_getattr_u32(tb[NETNSA_NSID]), where tb[] refers to answer`s content. If we set MALLOC_MMAP_THRESHOLD_=0, mmap will be adoped to malloc memory, which will be freed immediately after calling free func. So reading tb[NETNSA_NSID] will access the released memory after free(answer). Here, we will call get_netnsid_from_name(tb[NETNSA_NSID]) before free(answer). Fixes: 86bf43c7c2f ("lib/libnetlink: update rtnl_talk to support malloc buff at run time") Reported-by: Huiying Kou <[email protected]> Signed-off-by: Zhiqiang Liu <[email protected]> Acked-by: Phil Sutter <[email protected]> Signed-off-by: Stephen Hemminger <[email protected]>
GIntBig OGRKMLLayer::GetFeatureCount( int bForce ) { if( m_poFilterGeom != nullptr || m_poAttrQuery != nullptr ) return OGRLayer::GetFeatureCount(bForce); KML *poKMLFile = poDS_->GetKMLFile(); if( nullptr == poKMLFile ) return 0; poKMLFile->selectLayer(nLayerNumber_); return poKMLFile->getNumFeatures(); }
0
[ "CWE-787" ]
gdal
27b9bf644bcf1208f7d6594bdd104cc8a8bb0646
178,585,954,687,825,740,000,000,000,000,000,000,000
13
KML: set OAMS_TRADITIONAL_GIS_ORDER for SRS returned on returned layers
void CompactProtocolReader::readString(StrType& str) { int32_t size = 0; readStringSize(size); readStringBody(str, size); }
0
[ "CWE-703", "CWE-770" ]
fbthrift
c9a903e5902834e95bbd4ab0e9fa53ba0189f351
91,994,927,013,496,560,000,000,000,000,000,000,000
5
Better handling of truncated data when reading strings Summary: Currently we read string size and blindly pre-allocate it. This allows malicious attacker to send a few bytes message and cause server to allocate huge amount of memory (>1GB). This diff changes the logic to check if we have enough data in the buffer before allocating the string. This is a second part of a fix for CVE-2019-3553. Reviewed By: vitaut Differential Revision: D14393393 fbshipit-source-id: e2046d2f5b087d3abc9a9d2c6c107cf088673057
static int ip_vs_stats_show(struct seq_file *seq, void *v) { struct net *net = seq_file_single_net(seq); struct ip_vs_stats_user show; /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Total Incoming Outgoing Incoming Outgoing\n"); seq_printf(seq, " Conns Packets Packets Bytes Bytes\n"); ip_vs_copy_stats(&show, &net_ipvs(net)->tot_stats); seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", show.conns, show.inpkts, show.outpkts, (unsigned long long) show.inbytes, (unsigned long long) show.outbytes); /* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ seq_puts(seq, " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); seq_printf(seq, "%8X %8X %8X %16X %16X\n", show.cps, show.inpps, show.outpps, show.inbps, show.outbps); return 0; }
0
[ "CWE-200" ]
linux
2d8a041b7bfe1097af21441cb77d6af95f4f4680
206,231,222,557,981,370,000,000,000,000,000,000,000
26
ipvs: fix info leak in getsockopt(IP_VS_SO_GET_TIMEOUT) If at least one of CONFIG_IP_VS_PROTO_TCP or CONFIG_IP_VS_PROTO_UDP is not set, __ip_vs_get_timeouts() does not fully initialize the structure that gets copied to userland and that for leaks up to 12 bytes of kernel stack. Add an explicit memset(0) before passing the structure to __ip_vs_get_timeouts() to avoid the info leak. Signed-off-by: Mathias Krause <[email protected]> Cc: Wensong Zhang <[email protected]> Cc: Simon Horman <[email protected]> Cc: Julian Anastasov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static const struct hid_device_id *hid_match_device(struct hid_device *hdev, struct hid_driver *hdrv) { struct hid_dynid *dynid; spin_lock(&hdrv->dyn_lock); list_for_each_entry(dynid, &hdrv->dyn_list, list) { if (hid_match_one_id(hdev, &dynid->id)) { spin_unlock(&hdrv->dyn_lock); return &dynid->id; } } spin_unlock(&hdrv->dyn_lock); return hid_match_id(hdev, hdrv->id_table); }
0
[ "CWE-125" ]
linux
50220dead1650609206efe91f0cc116132d59b3f
17,358,759,561,939,646,000,000,000,000,000,000,000
16
HID: core: prevent out-of-bound readings Plugging a Logitech DJ receiver with KASAN activated raises a bunch of out-of-bound readings. The fields are allocated up to MAX_USAGE, meaning that potentially, we do not have enough fields to fit the incoming values. Add checks and silence KASAN. Signed-off-by: Benjamin Tissoires <[email protected]> Signed-off-by: Jiri Kosina <[email protected]>
zknownundef(i_ctx_t *i_ctx_p) { os_ptr op = osp; os_ptr op1 = op - 1; int code; check_type(*op1, t_dictionary); check_dict_write(*op1); code = idict_undef(op1, op); make_bool(op1, code == 0); pop(1); return 0; }
0
[]
ghostpdl
d683d1e6450d74619e6277efeebfc222d9a5cb91
291,716,635,668,839,920,000,000,000,000,000,000,000
13
Bug 700585: Obliterate "superexec". We don't need it, nor do any known apps. We were under the impression that the Windows driver 'PScript5.dll' used superexec, but after testing with our extensive suite of PostScript file, and analysis of the PScript5 "Adobe CoolType ProcSet, it does not appear that this operator is needed anymore. Get rid of superexec and all of the references to it, since it is a potential security hole.
scanner_scan_all (parser_context_t *context_p, /**< context */ const uint8_t *arg_list_p, /**< function argument list */ const uint8_t *arg_list_end_p, /**< end of argument list */ const uint8_t *source_p, /**< valid UTF-8 source code */ const uint8_t *source_end_p) /**< end of source code */ { scanner_context_t scanner_context; #if ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) if (context_p->is_show_opcodes) { JERRY_DEBUG_MSG ("\n--- Scanning start ---\n\n"); } #endif /* ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) */ scanner_context.context_status_flags = context_p->status_flags; scanner_context.status_flags = SCANNER_CONTEXT_NO_FLAGS; #if ENABLED (JERRY_DEBUGGER) if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_CONNECTED) { scanner_context.status_flags |= SCANNER_CONTEXT_DEBUGGER_ENABLED; } #endif /* ENABLED (JERRY_DEBUGGER) */ #if ENABLED (JERRY_ES2015) scanner_context.binding_type = SCANNER_BINDING_NONE; scanner_context.active_binding_list_p = NULL; #endif /* ENABLED (JERRY_ES2015) */ scanner_context.active_literal_pool_p = NULL; scanner_context.active_switch_statement.last_case_p = NULL; scanner_context.end_arguments_p = NULL; #if ENABLED (JERRY_ES2015) scanner_context.async_source_p = NULL; #endif /* ENABLED (JERRY_ES2015) */ /* This assignment must be here because of Apple compilers. */ context_p->u.scanner_context_p = &scanner_context; parser_stack_init (context_p); PARSER_TRY (context_p->try_buffer) { context_p->line = 1; context_p->column = 1; if (arg_list_p == NULL) { context_p->source_p = source_p; context_p->source_end_p = source_end_p; uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION_WITHOUT_ARGUMENTS | SCANNER_LITERAL_POOL_CAN_EVAL; if (context_p->status_flags & PARSER_IS_STRICT) { status_flags |= SCANNER_LITERAL_POOL_IS_STRICT; } scanner_literal_pool_t *literal_pool_p = scanner_push_literal_pool (context_p, &scanner_context, status_flags); literal_pool_p->source_p = source_p; parser_stack_push_uint8 (context_p, SCAN_STACK_SCRIPT); lexer_next_token (context_p); scanner_check_directives (context_p, &scanner_context); } else { context_p->source_p = arg_list_p; context_p->source_end_p = arg_list_end_p; uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION; if (context_p->status_flags & PARSER_IS_STRICT) { status_flags |= SCANNER_LITERAL_POOL_IS_STRICT; } #if ENABLED (JERRY_ES2015) if (context_p->status_flags & PARSER_IS_GENERATOR_FUNCTION) { status_flags |= SCANNER_LITERAL_POOL_GENERATOR; } #endif /* ENABLED (JERRY_ES2015) */ scanner_push_literal_pool (context_p, &scanner_context, status_flags); scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS; parser_stack_push_uint8 (context_p, SCAN_STACK_SCRIPT_FUNCTION); /* Faking the first token. */ context_p->token.type = LEXER_LEFT_PAREN; } while (true) { lexer_token_type_t type = (lexer_token_type_t) context_p->token.type; scan_stack_modes_t stack_top = (scan_stack_modes_t) context_p->stack_top_uint8; switch (scanner_context.mode) { case SCAN_MODE_PRIMARY_EXPRESSION: { if (type == LEXER_ADD || type == LEXER_SUBTRACT || LEXER_IS_UNARY_OP_TOKEN (type)) { break; } /* FALLTHRU */ } case SCAN_MODE_PRIMARY_EXPRESSION_AFTER_NEW: { if (scanner_scan_primary_expression (context_p, &scanner_context, type, stack_top) != SCAN_NEXT_TOKEN) { continue; } break; } #if ENABLED (JERRY_ES2015) case SCAN_MODE_CLASS_DECLARATION: { if (context_p->token.type == LEXER_KEYW_EXTENDS) { parser_stack_push_uint8 (context_p, SCAN_STACK_CLASS_EXTENDS); scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; break; } else if (context_p->token.type != LEXER_LEFT_BRACE) { scanner_raise_error (context_p); } scanner_context.mode = SCAN_MODE_CLASS_METHOD; /* FALLTHRU */ } case SCAN_MODE_CLASS_METHOD: { JERRY_ASSERT (stack_top == SCAN_STACK_IMPLICIT_CLASS_CONSTRUCTOR || stack_top == SCAN_STACK_EXPLICIT_CLASS_CONSTRUCTOR); lexer_skip_empty_statements (context_p); lexer_scan_identifier (context_p); if (context_p->token.type == LEXER_RIGHT_BRACE) { scanner_source_start_t source_start; parser_stack_pop_uint8 (context_p); if (stack_top == SCAN_STACK_IMPLICIT_CLASS_CONSTRUCTOR) { parser_stack_pop (context_p, &source_start, sizeof (scanner_source_start_t)); } stack_top = context_p->stack_top_uint8; JERRY_ASSERT (stack_top == SCAN_STACK_CLASS_STATEMENT || stack_top == SCAN_STACK_CLASS_EXPRESSION); if (stack_top == SCAN_STACK_CLASS_STATEMENT) { /* The token is kept to disallow consuming a semicolon after it. */ scanner_context.mode = SCAN_MODE_STATEMENT_END; continue; } scanner_context.mode = SCAN_MODE_POST_PRIMARY_EXPRESSION; parser_stack_pop_uint8 (context_p); break; } if (context_p->token.type == LEXER_LITERAL && LEXER_IS_IDENT_OR_STRING (context_p->token.lit_location.type) && lexer_compare_literal_to_string (context_p, "constructor", 11)) { if (stack_top == SCAN_STACK_IMPLICIT_CLASS_CONSTRUCTOR) { scanner_source_start_t source_start; parser_stack_pop_uint8 (context_p); parser_stack_pop (context_p, &source_start, sizeof (scanner_source_start_t)); scanner_info_t *info_p = scanner_insert_info (context_p, source_start.source_p, sizeof (scanner_info_t)); info_p->type = SCANNER_TYPE_CLASS_CONSTRUCTOR; parser_stack_push_uint8 (context_p, SCAN_STACK_EXPLICIT_CLASS_CONSTRUCTOR); } } if (lexer_token_is_identifier (context_p, "static", 6)) { lexer_scan_identifier (context_p); } parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PROPERTY); scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS; uint16_t literal_pool_flags = SCANNER_LITERAL_POOL_FUNCTION; if (lexer_token_is_identifier (context_p, "get", 3) || lexer_token_is_identifier (context_p, "set", 3)) { lexer_scan_identifier (context_p); if (context_p->token.type == LEXER_LEFT_PAREN) { scanner_push_literal_pool (context_p, &scanner_context, SCANNER_LITERAL_POOL_FUNCTION); continue; } } else if (lexer_token_is_identifier (context_p, "async", 5)) { lexer_scan_identifier (context_p); if (context_p->token.type == LEXER_LEFT_PAREN) { scanner_push_literal_pool (context_p, &scanner_context, SCANNER_LITERAL_POOL_FUNCTION); continue; } literal_pool_flags |= SCANNER_LITERAL_POOL_ASYNC; if (context_p->token.type == LEXER_MULTIPLY) { lexer_scan_identifier (context_p); literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR; } } else if (context_p->token.type == LEXER_MULTIPLY) { lexer_scan_identifier (context_p); literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR; } if (context_p->token.type == LEXER_LEFT_SQUARE) { parser_stack_push_uint8 (context_p, SCANNER_FROM_LITERAL_POOL_TO_COMPUTED (literal_pool_flags)); scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; break; } if (context_p->token.type != LEXER_LITERAL) { scanner_raise_error (context_p); } if (literal_pool_flags & SCANNER_LITERAL_POOL_GENERATOR) { context_p->status_flags |= PARSER_IS_GENERATOR_FUNCTION; } scanner_push_literal_pool (context_p, &scanner_context, literal_pool_flags); lexer_next_token (context_p); continue; } #endif /* ENABLED (JERRY_ES2015) */ case SCAN_MODE_POST_PRIMARY_EXPRESSION: { if (scanner_scan_post_primary_expression (context_p, &scanner_context, type, stack_top)) { break; } type = (lexer_token_type_t) context_p->token.type; /* FALLTHRU */ } case SCAN_MODE_PRIMARY_EXPRESSION_END: { if (scanner_scan_primary_expression_end (context_p, &scanner_context, type, stack_top) != SCAN_NEXT_TOKEN) { continue; } break; } case SCAN_MODE_STATEMENT_OR_TERMINATOR: { if (type == LEXER_RIGHT_BRACE || type == LEXER_EOS) { scanner_context.mode = SCAN_MODE_STATEMENT_END; continue; } /* FALLTHRU */ } case SCAN_MODE_STATEMENT: { if (scanner_scan_statement (context_p, &scanner_context, type, stack_top) != SCAN_NEXT_TOKEN) { continue; } break; } case SCAN_MODE_STATEMENT_END: { if (scanner_scan_statement_end (context_p, &scanner_context, type) != SCAN_NEXT_TOKEN) { continue; } if (context_p->token.type == LEXER_EOS) { goto scan_completed; } break; } case SCAN_MODE_VAR_STATEMENT: { #if ENABLED (JERRY_ES2015) if (type == LEXER_LEFT_SQUARE || type == LEXER_LEFT_BRACE) { uint8_t binding_type = SCANNER_BINDING_VAR; if (stack_top == SCAN_STACK_LET || stack_top == SCAN_STACK_FOR_LET_START) { binding_type = SCANNER_BINDING_LET; } else if (stack_top == SCAN_STACK_CONST || stack_top == SCAN_STACK_FOR_CONST_START) { binding_type = SCANNER_BINDING_CONST; } scanner_push_destructuring_pattern (context_p, &scanner_context, binding_type, false); if (type == LEXER_LEFT_SQUARE) { parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL); scanner_context.mode = SCAN_MODE_BINDING; break; } parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL); scanner_context.mode = SCAN_MODE_PROPERTY_NAME; continue; } #endif /* ENABLED (JERRY_ES2015) */ if (type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL) { scanner_raise_error (context_p); } lexer_lit_location_t *literal_p = scanner_add_literal (context_p, &scanner_context); #if ENABLED (JERRY_ES2015) if (stack_top != SCAN_STACK_VAR && stack_top != SCAN_STACK_FOR_VAR_START) { scanner_detect_invalid_let (context_p, literal_p); if (stack_top == SCAN_STACK_LET || stack_top == SCAN_STACK_FOR_LET_START) { literal_p->type |= SCANNER_LITERAL_IS_LET; } else { JERRY_ASSERT (stack_top == SCAN_STACK_CONST || stack_top == SCAN_STACK_FOR_CONST_START); literal_p->type |= SCANNER_LITERAL_IS_CONST; } lexer_next_token (context_p); if (literal_p->type & SCANNER_LITERAL_IS_USED) { literal_p->type |= SCANNER_LITERAL_EARLY_CREATE; } else if (context_p->token.type == LEXER_ASSIGN) { scanner_binding_literal_t binding_literal; binding_literal.literal_p = literal_p; parser_stack_push (context_p, &binding_literal, sizeof (scanner_binding_literal_t)); parser_stack_push_uint8 (context_p, SCAN_STACK_BINDING_INIT); } } else { if (!(literal_p->type & SCANNER_LITERAL_IS_VAR)) { scanner_detect_invalid_var (context_p, &scanner_context, literal_p); literal_p->type |= SCANNER_LITERAL_IS_VAR; if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH) { literal_p->type |= SCANNER_LITERAL_NO_REG; } } lexer_next_token (context_p); } #else /* !ENABLED (JERRY_ES2015) */ literal_p->type |= SCANNER_LITERAL_IS_VAR; if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH) { literal_p->type |= SCANNER_LITERAL_NO_REG; } lexer_next_token (context_p); #endif /* ENABLED (JERRY_ES2015) */ #if ENABLED (JERRY_ES2015_MODULE_SYSTEM) if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_EXPORT) { literal_p->type |= SCANNER_LITERAL_NO_REG; } #endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */ switch (context_p->token.type) { case LEXER_ASSIGN: { scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; /* FALLTHRU */ } case LEXER_COMMA: { lexer_next_token (context_p); continue; } } if (SCANNER_IS_FOR_START (stack_top)) { #if ENABLED (JERRY_ES2015_MODULE_SYSTEM) JERRY_ASSERT (!(scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_EXPORT)); #endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */ if (context_p->token.type != LEXER_SEMICOLON && context_p->token.type != LEXER_KEYW_IN && !SCANNER_IDENTIFIER_IS_OF ()) { scanner_raise_error (context_p); } scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION_END; continue; } #if ENABLED (JERRY_ES2015) JERRY_ASSERT (stack_top == SCAN_STACK_VAR || stack_top == SCAN_STACK_LET || stack_top == SCAN_STACK_CONST); #else /* !ENABLED (JERRY_ES2015) */ JERRY_ASSERT (stack_top == SCAN_STACK_VAR); #endif /* ENABLED (JERRY_ES2015) */ #if ENABLED (JERRY_ES2015_MODULE_SYSTEM) scanner_context.active_literal_pool_p->status_flags &= (uint16_t) ~SCANNER_LITERAL_POOL_IN_EXPORT; #endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */ scanner_context.mode = SCAN_MODE_STATEMENT_END; parser_stack_pop_uint8 (context_p); continue; } case SCAN_MODE_FUNCTION_ARGUMENTS: { JERRY_ASSERT (stack_top == SCAN_STACK_SCRIPT_FUNCTION || stack_top == SCAN_STACK_FUNCTION_STATEMENT || stack_top == SCAN_STACK_FUNCTION_EXPRESSION || stack_top == SCAN_STACK_FUNCTION_PROPERTY); scanner_literal_pool_t *literal_pool_p = scanner_context.active_literal_pool_p; JERRY_ASSERT (literal_pool_p != NULL && (literal_pool_p->status_flags & SCANNER_LITERAL_POOL_FUNCTION)); literal_pool_p->source_p = context_p->source_p; #if ENABLED (JERRY_ES2015) if (JERRY_UNLIKELY (scanner_context.async_source_p != NULL)) { literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ASYNC; literal_pool_p->source_p = scanner_context.async_source_p; scanner_context.async_source_p = NULL; } #endif /* ENABLED (JERRY_ES2015) */ if (type != LEXER_LEFT_PAREN) { scanner_raise_error (context_p); } lexer_next_token (context_p); #if ENABLED (JERRY_ES2015) /* FALLTHRU */ } case SCAN_MODE_CONTINUE_FUNCTION_ARGUMENTS: { #endif /* ENABLED (JERRY_ES2015) */ if (context_p->token.type != LEXER_RIGHT_PAREN && context_p->token.type != LEXER_EOS) { #if ENABLED (JERRY_ES2015) lexer_lit_location_t *argument_literal_p; #endif /* ENABLED (JERRY_ES2015) */ while (true) { #if ENABLED (JERRY_ES2015) if (context_p->token.type == LEXER_THREE_DOTS) { scanner_context.active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ARGUMENTS_UNMAPPED; lexer_next_token (context_p); } if (context_p->token.type == LEXER_LEFT_SQUARE || context_p->token.type == LEXER_LEFT_BRACE) { argument_literal_p = NULL; break; } #endif /* ENABLED (JERRY_ES2015) */ if (context_p->token.type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL) { scanner_raise_error (context_p); } #if ENABLED (JERRY_ES2015) argument_literal_p = scanner_append_argument (context_p, &scanner_context); #else /* !ENABLED (JERRY_ES2015) */ scanner_append_argument (context_p, &scanner_context); #endif /* ENABLED (JERRY_ES2015) */ lexer_next_token (context_p); if (context_p->token.type != LEXER_COMMA) { break; } lexer_next_token (context_p); } #if ENABLED (JERRY_ES2015) if (argument_literal_p == NULL) { scanner_context.active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ARGUMENTS_UNMAPPED; parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PARAMETERS); scanner_append_hole (context_p, &scanner_context); scanner_push_destructuring_pattern (context_p, &scanner_context, SCANNER_BINDING_ARG, false); if (context_p->token.type == LEXER_LEFT_SQUARE) { parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL); scanner_context.mode = SCAN_MODE_BINDING; break; } parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL); scanner_context.mode = SCAN_MODE_PROPERTY_NAME; continue; } if (context_p->token.type == LEXER_ASSIGN) { scanner_context.active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ARGUMENTS_UNMAPPED; parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PARAMETERS); scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; if (argument_literal_p->type & SCANNER_LITERAL_IS_USED) { JERRY_ASSERT (argument_literal_p->type & SCANNER_LITERAL_EARLY_CREATE); break; } scanner_binding_literal_t binding_literal; binding_literal.literal_p = argument_literal_p; parser_stack_push (context_p, &binding_literal, sizeof (scanner_binding_literal_t)); parser_stack_push_uint8 (context_p, SCAN_STACK_BINDING_INIT); break; } #endif /* ENABLED (JERRY_ES2015) */ } if (context_p->token.type == LEXER_EOS && stack_top == SCAN_STACK_SCRIPT_FUNCTION) { /* End of argument parsing. */ scanner_info_t *scanner_info_p = (scanner_info_t *) scanner_malloc (context_p, sizeof (scanner_info_t)); scanner_info_p->next_p = context_p->next_scanner_info_p; scanner_info_p->source_p = NULL; scanner_info_p->type = SCANNER_TYPE_END_ARGUMENTS; scanner_context.end_arguments_p = scanner_info_p; context_p->next_scanner_info_p = scanner_info_p; context_p->source_p = source_p; context_p->source_end_p = source_end_p; context_p->line = 1; context_p->column = 1; scanner_filter_arguments (context_p, &scanner_context); lexer_next_token (context_p); scanner_check_directives (context_p, &scanner_context); continue; } if (context_p->token.type != LEXER_RIGHT_PAREN) { scanner_raise_error (context_p); } lexer_next_token (context_p); if (context_p->token.type != LEXER_LEFT_BRACE) { scanner_raise_error (context_p); } scanner_filter_arguments (context_p, &scanner_context); lexer_next_token (context_p); scanner_check_directives (context_p, &scanner_context); continue; } case SCAN_MODE_PROPERTY_NAME: { JERRY_ASSERT (stack_top == SCAN_STACK_OBJECT_LITERAL); if (lexer_scan_identifier (context_p)) { lexer_check_property_modifier (context_p); } #if ENABLED (JERRY_ES2015) if (context_p->token.type == LEXER_LEFT_SQUARE) { parser_stack_push_uint8 (context_p, SCAN_STACK_COMPUTED_PROPERTY); scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; break; } #endif /* ENABLED (JERRY_ES2015) */ if (context_p->token.type == LEXER_RIGHT_BRACE) { scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION_END; continue; } if (context_p->token.type == LEXER_PROPERTY_GETTER #if ENABLED (JERRY_ES2015) || context_p->token.type == LEXER_KEYW_ASYNC || context_p->token.type == LEXER_MULTIPLY #endif /* ENABLED (JERRY_ES2015) */ || context_p->token.type == LEXER_PROPERTY_SETTER) { uint16_t literal_pool_flags = SCANNER_LITERAL_POOL_FUNCTION; #if ENABLED (JERRY_ES2015) if (context_p->token.type == LEXER_MULTIPLY) { literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR; } else if (context_p->token.type == LEXER_KEYW_ASYNC) { literal_pool_flags |= SCANNER_LITERAL_POOL_ASYNC; if (lexer_consume_generator (context_p)) { literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR; } } #endif /* ENABLED (JERRY_ES2015) */ parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PROPERTY); lexer_scan_identifier (context_p); #if ENABLED (JERRY_ES2015) if (context_p->token.type == LEXER_LEFT_SQUARE) { parser_stack_push_uint8 (context_p, SCANNER_FROM_LITERAL_POOL_TO_COMPUTED (literal_pool_flags)); scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; break; } #endif /* ENABLED (JERRY_ES2015) */ if (context_p->token.type != LEXER_LITERAL) { scanner_raise_error (context_p); } scanner_push_literal_pool (context_p, &scanner_context, literal_pool_flags); scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS; break; } if (context_p->token.type != LEXER_LITERAL) { scanner_raise_error (context_p); } #if ENABLED (JERRY_ES2015) parser_line_counter_t start_line = context_p->token.line; parser_line_counter_t start_column = context_p->token.column; bool is_ident = (context_p->token.lit_location.type == LEXER_IDENT_LITERAL); #endif /* ENABLED (JERRY_ES2015) */ lexer_next_token (context_p); #if ENABLED (JERRY_ES2015) if (context_p->token.type == LEXER_LEFT_PAREN) { scanner_push_literal_pool (context_p, &scanner_context, SCANNER_LITERAL_POOL_FUNCTION); parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PROPERTY); scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS; continue; } if (is_ident && (context_p->token.type == LEXER_COMMA || context_p->token.type == LEXER_RIGHT_BRACE || context_p->token.type == LEXER_ASSIGN)) { context_p->source_p = context_p->token.lit_location.char_p; context_p->line = start_line; context_p->column = start_column; lexer_next_token (context_p); JERRY_ASSERT (context_p->token.type != LEXER_LITERAL || context_p->token.lit_location.type == LEXER_IDENT_LITERAL); if (context_p->token.type != LEXER_LITERAL) { scanner_raise_error (context_p); } if (scanner_context.binding_type != SCANNER_BINDING_NONE) { scanner_context.mode = SCAN_MODE_BINDING; continue; } scanner_add_reference (context_p, &scanner_context); lexer_next_token (context_p); if (context_p->token.type == LEXER_ASSIGN) { scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; break; } scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION_END; continue; } #endif /* ENABLED (JERRY_ES2015) */ if (context_p->token.type != LEXER_COLON) { scanner_raise_error (context_p); } scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; #if ENABLED (JERRY_ES2015) if (scanner_context.binding_type != SCANNER_BINDING_NONE) { scanner_context.mode = SCAN_MODE_BINDING; } #endif /* ENABLED (JERRY_ES2015) */ break; } #if ENABLED (JERRY_ES2015) case SCAN_MODE_BINDING: { JERRY_ASSERT (scanner_context.binding_type == SCANNER_BINDING_VAR || scanner_context.binding_type == SCANNER_BINDING_LET || scanner_context.binding_type == SCANNER_BINDING_CATCH || scanner_context.binding_type == SCANNER_BINDING_CONST || scanner_context.binding_type == SCANNER_BINDING_ARG || scanner_context.binding_type == SCANNER_BINDING_ARROW_ARG); if (type == LEXER_THREE_DOTS) { lexer_next_token (context_p); type = (lexer_token_type_t) context_p->token.type; } if (type == LEXER_LEFT_SQUARE || type == LEXER_LEFT_BRACE) { scanner_push_destructuring_pattern (context_p, &scanner_context, scanner_context.binding_type, true); if (type == LEXER_LEFT_SQUARE) { parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL); break; } parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL); scanner_context.mode = SCAN_MODE_PROPERTY_NAME; continue; } if (type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL) { scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; continue; } lexer_lit_location_t *literal_p = scanner_add_literal (context_p, &scanner_context); scanner_context.mode = SCAN_MODE_POST_PRIMARY_EXPRESSION; if (scanner_context.binding_type == SCANNER_BINDING_VAR) { if (!(literal_p->type & SCANNER_LITERAL_IS_VAR)) { scanner_detect_invalid_var (context_p, &scanner_context, literal_p); literal_p->type |= SCANNER_LITERAL_IS_VAR; if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH) { literal_p->type |= SCANNER_LITERAL_NO_REG; } } break; } if (scanner_context.binding_type == SCANNER_BINDING_ARROW_ARG) { literal_p->type |= SCANNER_LITERAL_IS_ARG | SCANNER_LITERAL_IS_ARROW_DESTRUCTURED_ARG; if (literal_p->type & SCANNER_LITERAL_IS_USED) { literal_p->type |= SCANNER_LITERAL_EARLY_CREATE; break; } } else { scanner_detect_invalid_let (context_p, literal_p); if (scanner_context.binding_type <= SCANNER_BINDING_CATCH) { JERRY_ASSERT ((scanner_context.binding_type == SCANNER_BINDING_LET) || (scanner_context.binding_type == SCANNER_BINDING_CATCH)); literal_p->type |= SCANNER_LITERAL_IS_LET; } else { literal_p->type |= SCANNER_LITERAL_IS_CONST; if (scanner_context.binding_type == SCANNER_BINDING_ARG) { literal_p->type |= SCANNER_LITERAL_IS_ARG; if (literal_p->type & SCANNER_LITERAL_IS_USED) { literal_p->type |= SCANNER_LITERAL_EARLY_CREATE; break; } } } if (literal_p->type & SCANNER_LITERAL_IS_USED) { literal_p->type |= SCANNER_LITERAL_EARLY_CREATE; break; } } scanner_binding_item_t *binding_item_p; binding_item_p = (scanner_binding_item_t *) scanner_malloc (context_p, sizeof (scanner_binding_item_t)); binding_item_p->next_p = scanner_context.active_binding_list_p->items_p; binding_item_p->literal_p = literal_p; scanner_context.active_binding_list_p->items_p = binding_item_p; lexer_next_token (context_p); if (context_p->token.type != LEXER_ASSIGN) { continue; } scanner_binding_literal_t binding_literal; binding_literal.literal_p = literal_p; parser_stack_push (context_p, &binding_literal, sizeof (scanner_binding_literal_t)); parser_stack_push_uint8 (context_p, SCAN_STACK_BINDING_INIT); scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION; break; } #endif /* ENABLED (JERRY_ES2015) */ } lexer_next_token (context_p); } scan_completed: if (context_p->stack_top_uint8 != SCAN_STACK_SCRIPT && context_p->stack_top_uint8 != SCAN_STACK_SCRIPT_FUNCTION) { scanner_raise_error (context_p); } scanner_pop_literal_pool (context_p, &scanner_context); #if ENABLED (JERRY_ES2015) JERRY_ASSERT (scanner_context.active_binding_list_p == NULL); #endif /* ENABLED (JERRY_ES2015) */ JERRY_ASSERT (scanner_context.active_literal_pool_p == NULL); #ifndef JERRY_NDEBUG scanner_context.context_status_flags |= PARSER_SCANNING_SUCCESSFUL; #endif /* !JERRY_NDEBUG */ } PARSER_CATCH { #if ENABLED (JERRY_ES2015) while (scanner_context.active_binding_list_p != NULL) { scanner_pop_binding_list (&scanner_context); } #endif /* ENABLED (JERRY_ES2015) */ if (JERRY_UNLIKELY (context_p->error != PARSER_ERR_OUT_OF_MEMORY)) { /* Ignore the errors thrown by the lexer. */ context_p->error = PARSER_ERR_NO_ERROR; /* The following code may allocate memory, so it is enclosed in a try/catch. */ PARSER_TRY (context_p->try_buffer) { #if ENABLED (JERRY_ES2015) if (scanner_context.status_flags & SCANNER_CONTEXT_THROW_ERR_ASYNC_FUNCTION) { JERRY_ASSERT (scanner_context.async_source_p != NULL); scanner_info_t *info_p; info_p = scanner_insert_info (context_p, scanner_context.async_source_p, sizeof (scanner_info_t)); info_p->type = SCANNER_TYPE_ERR_ASYNC_FUNCTION; } #endif /* ENABLED (JERRY_ES2015) */ while (scanner_context.active_literal_pool_p != NULL) { scanner_pop_literal_pool (context_p, &scanner_context); } } PARSER_CATCH { JERRY_ASSERT (context_p->error == PARSER_ERR_OUT_OF_MEMORY); } PARSER_TRY_END } JERRY_ASSERT (context_p->error == PARSER_ERR_NO_ERROR || context_p->error == PARSER_ERR_OUT_OF_MEMORY); if (context_p->error == PARSER_ERR_OUT_OF_MEMORY) { while (scanner_context.active_literal_pool_p != NULL) { scanner_literal_pool_t *literal_pool_p = scanner_context.active_literal_pool_p; scanner_context.active_literal_pool_p = literal_pool_p->prev_p; parser_list_free (&literal_pool_p->literal_pool); scanner_free (literal_pool_p, sizeof (scanner_literal_pool_t)); } parser_stack_free (context_p); return; } } PARSER_TRY_END context_p->status_flags = scanner_context.context_status_flags; scanner_reverse_info_list (context_p); #if ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) if (context_p->is_show_opcodes) { scanner_info_t *info_p = context_p->next_scanner_info_p; const uint8_t *source_start_p = (arg_list_p == NULL) ? source_p : arg_list_p; while (info_p->type != SCANNER_TYPE_END) { const char *name_p = NULL; bool print_location = false; switch (info_p->type) { case SCANNER_TYPE_END_ARGUMENTS: { JERRY_DEBUG_MSG (" END_ARGUMENTS\n"); source_start_p = source_p; break; } case SCANNER_TYPE_FUNCTION: case SCANNER_TYPE_BLOCK: { const uint8_t *prev_source_p = info_p->source_p - 1; const uint8_t *data_p; if (info_p->type == SCANNER_TYPE_FUNCTION) { data_p = (const uint8_t *) (info_p + 1); JERRY_DEBUG_MSG (" FUNCTION: flags: 0x%x declarations: %d", (int) info_p->u8_arg, (int) info_p->u16_arg); } else { data_p = (const uint8_t *) (info_p + 1); JERRY_DEBUG_MSG (" BLOCK:"); } JERRY_DEBUG_MSG (" source:%d\n", (int) (info_p->source_p - source_start_p)); while (data_p[0] != SCANNER_STREAM_TYPE_END) { switch (data_p[0] & SCANNER_STREAM_TYPE_MASK) { case SCANNER_STREAM_TYPE_VAR: { JERRY_DEBUG_MSG (" VAR "); break; } #if ENABLED (JERRY_ES2015) case SCANNER_STREAM_TYPE_LET: { JERRY_DEBUG_MSG (" LET "); break; } case SCANNER_STREAM_TYPE_CONST: { JERRY_DEBUG_MSG (" CONST "); break; } case SCANNER_STREAM_TYPE_LOCAL: { JERRY_DEBUG_MSG (" LOCAL "); break; } #endif /* ENABLED (JERRY_ES2015) */ #if ENABLED (JERRY_ES2015_MODULE_SYSTEM) case SCANNER_STREAM_TYPE_IMPORT: { JERRY_DEBUG_MSG (" IMPORT "); break; } #endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */ case SCANNER_STREAM_TYPE_ARG: { JERRY_DEBUG_MSG (" ARG "); break; } #if ENABLED (JERRY_ES2015) case SCANNER_STREAM_TYPE_DESTRUCTURED_ARG: { JERRY_DEBUG_MSG (" DESTRUCTURED_ARG "); break; } #endif /* ENABLED (JERRY_ES2015) */ case SCANNER_STREAM_TYPE_ARG_FUNC: { JERRY_DEBUG_MSG (" ARG_FUNC "); break; } #if ENABLED (JERRY_ES2015) case SCANNER_STREAM_TYPE_DESTRUCTURED_ARG_FUNC: { JERRY_DEBUG_MSG (" DESTRUCTURED_ARG_FUNC "); break; } #endif /* ENABLED (JERRY_ES2015) */ case SCANNER_STREAM_TYPE_FUNC: { JERRY_DEBUG_MSG (" FUNC "); break; } default: { JERRY_ASSERT ((data_p[0] & SCANNER_STREAM_TYPE_MASK) == SCANNER_STREAM_TYPE_HOLE); JERRY_DEBUG_MSG (" HOLE\n"); data_p++; continue; } } size_t length; if (!(data_p[0] & SCANNER_STREAM_UINT16_DIFF)) { if (data_p[2] != 0) { prev_source_p += data_p[2]; length = 2 + 1; } else { memcpy (&prev_source_p, data_p + 2 + 1, sizeof (const uint8_t *)); length = 2 + 1 + sizeof (const uint8_t *); } } else { int32_t diff = ((int32_t) data_p[2]) | ((int32_t) data_p[3]) << 8; if (diff <= UINT8_MAX) { diff = -diff; } prev_source_p += diff; length = 2 + 2; } #if ENABLED (JERRY_ES2015) if (data_p[0] & SCANNER_STREAM_EARLY_CREATE) { JERRY_ASSERT (data_p[0] & SCANNER_STREAM_NO_REG); JERRY_DEBUG_MSG ("*"); } #endif /* ENABLED (JERRY_ES2015) */ if (data_p[0] & SCANNER_STREAM_NO_REG) { JERRY_DEBUG_MSG ("* "); } JERRY_DEBUG_MSG ("'%.*s'\n", data_p[1], (char *) prev_source_p); prev_source_p += data_p[1]; data_p += length; } break; } case SCANNER_TYPE_WHILE: { name_p = "WHILE"; print_location = true; break; } case SCANNER_TYPE_FOR: { scanner_for_info_t *for_info_p = (scanner_for_info_t *) info_p; JERRY_DEBUG_MSG (" FOR: source:%d expression:%d[%d:%d] end:%d[%d:%d]\n", (int) (for_info_p->info.source_p - source_start_p), (int) (for_info_p->expression_location.source_p - source_start_p), (int) for_info_p->expression_location.line, (int) for_info_p->expression_location.column, (int) (for_info_p->end_location.source_p - source_start_p), (int) for_info_p->end_location.line, (int) for_info_p->end_location.column); break; } case SCANNER_TYPE_FOR_IN: { name_p = "FOR-IN"; print_location = true; break; } #if ENABLED (JERRY_ES2015) case SCANNER_TYPE_FOR_OF: { name_p = "FOR-OF"; print_location = true; break; } #endif /* ENABLED (JERRY_ES2015) */ case SCANNER_TYPE_SWITCH: { JERRY_DEBUG_MSG (" SWITCH: source:%d\n", (int) (info_p->source_p - source_start_p)); scanner_case_info_t *current_case_p = ((scanner_switch_info_t *) info_p)->case_p; while (current_case_p != NULL) { JERRY_DEBUG_MSG (" CASE: location:%d[%d:%d]\n", (int) (current_case_p->location.source_p - source_start_p), (int) current_case_p->location.line, (int) current_case_p->location.column); current_case_p = current_case_p->next_p; } break; } case SCANNER_TYPE_CASE: { name_p = "CASE"; print_location = true; break; } #if ENABLED (JERRY_ES2015) case SCANNER_TYPE_INITIALIZER: { name_p = "INITIALIZER"; print_location = true; break; } case SCANNER_TYPE_CLASS_CONSTRUCTOR: { JERRY_DEBUG_MSG (" CLASS-CONSTRUCTOR: source:%d\n", (int) (info_p->source_p - source_start_p)); print_location = false; break; } case SCANNER_TYPE_LET_EXPRESSION: { JERRY_DEBUG_MSG (" LET_EXPRESSION: source:%d\n", (int) (info_p->source_p - source_start_p)); break; } case SCANNER_TYPE_ERR_REDECLARED: { JERRY_DEBUG_MSG (" ERR_REDECLARED: source:%d\n", (int) (info_p->source_p - source_start_p)); break; } case SCANNER_TYPE_ERR_ASYNC_FUNCTION: { JERRY_DEBUG_MSG (" ERR_ASYNC_FUNCTION: source:%d\n", (int) (info_p->source_p - source_start_p)); break; } #endif /* ENABLED (JERRY_ES2015) */ } if (print_location) { scanner_location_info_t *location_info_p = (scanner_location_info_t *) info_p; JERRY_DEBUG_MSG (" %s: source:%d location:%d[%d:%d]\n", name_p, (int) (location_info_p->info.source_p - source_start_p), (int) (location_info_p->location.source_p - source_start_p), (int) location_info_p->location.line, (int) location_info_p->location.column); } info_p = info_p->next_p; } JERRY_DEBUG_MSG ("\n--- Scanning end ---\n\n"); } #endif /* ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) */ parser_stack_free (context_p); } /* scanner_scan_all */
0
[ "CWE-476", "CWE-703", "CWE-754" ]
jerryscript
69f8e78c2f8d562bd6d8002b5488f1662ac30d24
279,987,808,649,331,360,000,000,000,000,000,000,000
1,238
Fix error handling in scanner when in case of OOM (#3793) This patch fixes #3786 and fixes #3788. JerryScript-DCO-1.0-Signed-off-by: Robert Fancsik [email protected]
write_one_private_string_value (CopyOneSettingValueInfo *info, const char *tag) { const char *value; g_return_if_fail (info != NULL); g_return_if_fail (tag != NULL); value = g_object_get_data (G_OBJECT (info->connection), tag); nm_gconf_set_string_helper (info->client, info->dir, tag, NM_SETTING_802_1X_SETTING_NAME, value); }
0
[ "CWE-310" ]
network-manager-applet
4020594dfbf566f1852f0acb36ad631a9e73a82b
75,566,037,402,925,690,000,000,000,000,000,000,000
12
core: fix CA cert mishandling after cert file deletion (deb #560067) (rh #546793) If a connection was created with a CA certificate, but the user later moved or deleted that CA certificate, the applet would simply provide the connection to NetworkManager without any CA certificate. This could cause NM to connect to the original network (or a network spoofing the original network) without verifying the identity of the network as the user expects. In the future we can/should do better here by (1) alerting the user that some connection is now no longer complete by flagging it in the connection editor or notifying the user somehow, and (2) by using a freaking' cert store already (not that Linux has one yet).
int usb_device_attach(USBDevice *dev) { USBBus *bus = usb_bus_from_device(dev); USBPort *port = dev->port; char devspeed[32], portspeed[32]; assert(port != NULL); assert(!dev->attached); usb_mask_to_str(devspeed, sizeof(devspeed), dev->speedmask); usb_mask_to_str(portspeed, sizeof(portspeed), port->speedmask); trace_usb_port_attach(bus->busnr, port->path, devspeed, portspeed); if (!(port->speedmask & dev->speedmask)) { error_report("Warning: speed mismatch trying to attach" " usb device \"%s\" (%s speed)" " to bus \"%s\", port \"%s\" (%s speed)", dev->product_desc, devspeed, bus->qbus.name, port->path, portspeed); return -1; } dev->attached++; usb_attach(port); return 0; }
0
[ "CWE-119" ]
qemu
9f8e9895c504149d7048e9fc5eb5cbb34b16e49a
178,515,210,152,084,100,000,000,000,000,000,000,000
27
usb: sanity check setup_index+setup_len in post_load CVE-2013-4541 s->setup_len and s->setup_index are fed into usb_packet_copy as size/offset into s->data_buf, it's possible for invalid state to exploit this to load arbitrary data. setup_len and setup_index should be checked to make sure they are not negative. Cc: Gerd Hoffmann <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Gerd Hoffmann <[email protected]> Signed-off-by: Juan Quintela <[email protected]>
/* }}} */ static int php_date_timezone_initialize_from_hash(zval **return_value, php_timezone_obj **tzobj, HashTable *myht TSRMLS_DC) { zval **z_timezone = NULL; zval **z_timezone_type = NULL; if (zend_hash_find(myht, "timezone_type", 14, (void**) &z_timezone_type) == SUCCESS && Z_TYPE_PP(z_timezone_type) == IS_LONG) { if (zend_hash_find(myht, "timezone", 9, (void**) &z_timezone) == SUCCESS && Z_TYPE_PP(z_timezone) == IS_STRING) { if (SUCCESS == timezone_initialize(*tzobj, Z_STRVAL_PP(z_timezone) TSRMLS_CC)) { return SUCCESS; } } }
1
[]
php-src
bb057498f7457e8b2eba98332a3bad434de4cf12
196,402,432,583,036,450,000,000,000,000,000,000,000
14
Fix #70277: new DateTimeZone($foo) is ignoring text after null byte The DateTimeZone constructors are not binary safe. They're parsing the timezone as string, but discard the length when calling timezone_initialize(). This patch adds a tz_len parameter and a respective check to timezone_initialize().
OJPEGPrintDir(TIFF* tif, FILE* fd, long flags) { OJPEGState* sp=(OJPEGState*)tif->tif_data; uint8 m; (void)flags; assert(sp!=NULL); if (TIFFFieldSet(tif,FIELD_OJPEG_JPEGINTERCHANGEFORMAT)) fprintf(fd," JpegInterchangeFormat: " TIFF_UINT64_FORMAT "\n",(TIFF_UINT64_T)sp->jpeg_interchange_format); if (TIFFFieldSet(tif,FIELD_OJPEG_JPEGINTERCHANGEFORMATLENGTH)) fprintf(fd," JpegInterchangeFormatLength: " TIFF_UINT64_FORMAT "\n",(TIFF_UINT64_T)sp->jpeg_interchange_format_length); if (TIFFFieldSet(tif,FIELD_OJPEG_JPEGQTABLES)) { fprintf(fd," JpegQTables:"); for (m=0; m<sp->qtable_offset_count; m++) fprintf(fd," " TIFF_UINT64_FORMAT,(TIFF_UINT64_T)sp->qtable_offset[m]); fprintf(fd,"\n"); } if (TIFFFieldSet(tif,FIELD_OJPEG_JPEGDCTABLES)) { fprintf(fd," JpegDcTables:"); for (m=0; m<sp->dctable_offset_count; m++) fprintf(fd," " TIFF_UINT64_FORMAT,(TIFF_UINT64_T)sp->dctable_offset[m]); fprintf(fd,"\n"); } if (TIFFFieldSet(tif,FIELD_OJPEG_JPEGACTABLES)) { fprintf(fd," JpegAcTables:"); for (m=0; m<sp->actable_offset_count; m++) fprintf(fd," " TIFF_UINT64_FORMAT,(TIFF_UINT64_T)sp->actable_offset[m]); fprintf(fd,"\n"); } if (TIFFFieldSet(tif,FIELD_OJPEG_JPEGPROC)) fprintf(fd," JpegProc: %u\n",(unsigned int)sp->jpeg_proc); if (TIFFFieldSet(tif,FIELD_OJPEG_JPEGRESTARTINTERVAL)) fprintf(fd," JpegRestartInterval: %u\n",(unsigned int)sp->restart_interval); if (sp->printdir) (*sp->printdir)(tif, fd, flags); }
0
[ "CWE-369" ]
libtiff
43bc256d8ae44b92d2734a3c5bc73957a4d7c1ec
165,793,200,369,715,600,000,000,000,000,000,000,000
38
* libtiff/tif_ojpeg.c: make OJPEGDecode() early exit in case of failure in OJPEGPreDecode(). This will avoid a divide by zero, and potential other issues. Reported by Agostino Sarubbo. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2611
static int www_body(int s, int stype, unsigned char *context) { char *buf = NULL; int ret = 1; int i, j, k, dot; SSL *con; const SSL_CIPHER *c; BIO *io, *ssl_bio, *sbio; #ifdef RENEG int total_bytes = 0; #endif int width; fd_set readfds; /* Set width for a select call if needed */ width = s + 1; buf = app_malloc(bufsize, "server www buffer"); io = BIO_new(BIO_f_buffer()); ssl_bio = BIO_new(BIO_f_ssl()); if ((io == NULL) || (ssl_bio == NULL)) goto err; #ifdef FIONBIO if (s_nbio) { unsigned long sl = 1; if (!s_quiet) BIO_printf(bio_err, "turning on non blocking io\n"); if (BIO_socket_ioctl(s, FIONBIO, &sl) < 0) ERR_print_errors(bio_err); } #endif /* lets make the output buffer a reasonable size */ if (!BIO_set_write_buffer_size(io, bufsize)) goto err; if ((con = SSL_new(ctx)) == NULL) goto err; if (s_tlsextdebug) { SSL_set_tlsext_debug_callback(con, tlsext_cb); SSL_set_tlsext_debug_arg(con, bio_s_out); } if (context && !SSL_set_session_id_context(con, context, strlen((char *)context))) goto err; sbio = BIO_new_socket(s, BIO_NOCLOSE); if (s_nbio_test) { BIO *test; test = BIO_new(BIO_f_nbio_test()); sbio = BIO_push(test, sbio); } SSL_set_bio(con, sbio, sbio); SSL_set_accept_state(con); /* SSL_set_fd(con,s); */ BIO_set_ssl(ssl_bio, con, BIO_CLOSE); BIO_push(io, ssl_bio); #ifdef CHARSET_EBCDIC io = BIO_push(BIO_new(BIO_f_ebcdic_filter()), io); #endif if (s_debug) { BIO_set_callback(SSL_get_rbio(con), bio_dump_callback); BIO_set_callback_arg(SSL_get_rbio(con), (char *)bio_s_out); } if (s_msg) { #ifndef OPENSSL_NO_SSL_TRACE if (s_msg == 2) SSL_set_msg_callback(con, SSL_trace); else #endif SSL_set_msg_callback(con, msg_cb); SSL_set_msg_callback_arg(con, bio_s_msg ? bio_s_msg : bio_s_out); } for (;;) { i = BIO_gets(io, buf, bufsize - 1); if (i < 0) { /* error */ if (!BIO_should_retry(io) && !SSL_waiting_for_async(con)) { if (!s_quiet) ERR_print_errors(bio_err); goto err; } else { BIO_printf(bio_s_out, "read R BLOCK\n"); #ifndef OPENSSL_NO_SRP if (BIO_should_io_special(io) && BIO_get_retry_reason(io) == BIO_RR_SSL_X509_LOOKUP) { BIO_printf(bio_s_out, "LOOKUP renego during read\n"); SRP_user_pwd_free(srp_callback_parm.user); srp_callback_parm.user = SRP_VBASE_get1_by_user(srp_callback_parm.vb, srp_callback_parm.login); if (srp_callback_parm.user) BIO_printf(bio_s_out, "LOOKUP done %s\n", srp_callback_parm.user->info); else BIO_printf(bio_s_out, "LOOKUP not successful\n"); continue; } #endif #if defined(OPENSSL_SYS_NETWARE) delay(1000); #elif !defined(OPENSSL_SYS_MSDOS) sleep(1); #endif continue; } } else if (i == 0) { /* end of input */ ret = 1; goto end; } /* else we have data */ if (((www == 1) && (strncmp("GET ", buf, 4) == 0)) || ((www == 2) && (strncmp("GET /stats ", buf, 11) == 0))) { char *p; X509 *peer; STACK_OF(SSL_CIPHER) *sk; static const char *space = " "; if (www == 1 && strncmp("GET /reneg", buf, 10) == 0) { if (strncmp("GET /renegcert", buf, 14) == 0) SSL_set_verify(con, SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE, NULL); i = SSL_renegotiate(con); BIO_printf(bio_s_out, "SSL_renegotiate -> %d\n", i); /* Send the HelloRequest */ i = SSL_do_handshake(con); if (i <= 0) { BIO_printf(bio_s_out, "SSL_do_handshake() Retval %d\n", SSL_get_error(con, i)); ERR_print_errors(bio_err); goto err; } /* Wait for a ClientHello to come back */ FD_ZERO(&readfds); openssl_fdset(s, &readfds); i = select(width, (void *)&readfds, NULL, NULL, NULL); if (i <= 0 || !FD_ISSET(s, &readfds)) { BIO_printf(bio_s_out, "Error waiting for client response\n"); ERR_print_errors(bio_err); goto err; } /* * We're not acutally expecting any data here and we ignore * any that is sent. This is just to force the handshake that * we're expecting to come from the client. If they haven't * sent one there's not much we can do. */ BIO_gets(io, buf, bufsize - 1); } BIO_puts(io, "HTTP/1.0 200 ok\r\nContent-type: text/html\r\n\r\n"); BIO_puts(io, "<HTML><BODY BGCOLOR=\"#ffffff\">\n"); BIO_puts(io, "<pre>\n"); /* BIO_puts(io,OpenSSL_version(OPENSSL_VERSION));*/ BIO_puts(io, "\n"); for (i = 0; i < local_argc; i++) { const char *myp; for (myp = local_argv[i]; *myp; myp++) switch (*myp) { case '<': BIO_puts(io, "&lt;"); break; case '>': BIO_puts(io, "&gt;"); break; case '&': BIO_puts(io, "&amp;"); break; default: BIO_write(io, myp, 1); break; } BIO_write(io, " ", 1); } BIO_puts(io, "\n"); BIO_printf(io, "Secure Renegotiation IS%s supported\n", SSL_get_secure_renegotiation_support(con) ? "" : " NOT"); /* * The following is evil and should not really be done */ BIO_printf(io, "Ciphers supported in s_server binary\n"); sk = SSL_get_ciphers(con); j = sk_SSL_CIPHER_num(sk); for (i = 0; i < j; i++) { c = sk_SSL_CIPHER_value(sk, i); BIO_printf(io, "%-11s:%-25s ", SSL_CIPHER_get_version(c), SSL_CIPHER_get_name(c)); if ((((i + 1) % 2) == 0) && (i + 1 != j)) BIO_puts(io, "\n"); } BIO_puts(io, "\n"); p = SSL_get_shared_ciphers(con, buf, bufsize); if (p != NULL) { BIO_printf(io, "---\nCiphers common between both SSL end points:\n"); j = i = 0; while (*p) { if (*p == ':') { BIO_write(io, space, 26 - j); i++; j = 0; BIO_write(io, ((i % 3) ? " " : "\n"), 1); } else { BIO_write(io, p, 1); j++; } p++; } BIO_puts(io, "\n"); } ssl_print_sigalgs(io, con); #ifndef OPENSSL_NO_EC ssl_print_curves(io, con, 0); #endif BIO_printf(io, (SSL_session_reused(con) ? "---\nReused, " : "---\nNew, ")); c = SSL_get_current_cipher(con); BIO_printf(io, "%s, Cipher is %s\n", SSL_CIPHER_get_version(c), SSL_CIPHER_get_name(c)); SSL_SESSION_print(io, SSL_get_session(con)); BIO_printf(io, "---\n"); print_stats(io, SSL_get_SSL_CTX(con)); BIO_printf(io, "---\n"); peer = SSL_get_peer_certificate(con); if (peer != NULL) { BIO_printf(io, "Client certificate\n"); X509_print(io, peer); PEM_write_bio_X509(io, peer); } else BIO_puts(io, "no client certificate available\n"); BIO_puts(io, "</BODY></HTML>\r\n\r\n"); break; } else if ((www == 2 || www == 3) && (strncmp("GET /", buf, 5) == 0)) { BIO *file; char *p, *e; static const char *text = "HTTP/1.0 200 ok\r\nContent-type: text/plain\r\n\r\n"; /* skip the '/' */ p = &(buf[5]); dot = 1; for (e = p; *e != '\0'; e++) { if (e[0] == ' ') break; switch (dot) { case 1: dot = (e[0] == '.') ? 2 : 0; break; case 2: dot = (e[0] == '.') ? 3 : 0; break; case 3: dot = (e[0] == '/') ? -1 : 0; break; } if (dot == 0) dot = (e[0] == '/') ? 1 : 0; } dot = (dot == 3) || (dot == -1); /* filename contains ".." * component */ if (*e == '\0') { BIO_puts(io, text); BIO_printf(io, "'%s' is an invalid file name\r\n", p); break; } *e = '\0'; if (dot) { BIO_puts(io, text); BIO_printf(io, "'%s' contains '..' reference\r\n", p); break; } if (*p == '/') { BIO_puts(io, text); BIO_printf(io, "'%s' is an invalid path\r\n", p); break; } /* if a directory, do the index thang */ if (app_isdir(p) > 0) { BIO_puts(io, text); BIO_printf(io, "'%s' is a directory\r\n", p); break; } if ((file = BIO_new_file(p, "r")) == NULL) { BIO_puts(io, text); BIO_printf(io, "Error opening '%s'\r\n", p); ERR_print_errors(io); break; } if (!s_quiet) BIO_printf(bio_err, "FILE:%s\n", p); if (www == 2) { i = strlen(p); if (((i > 5) && (strcmp(&(p[i - 5]), ".html") == 0)) || ((i > 4) && (strcmp(&(p[i - 4]), ".php") == 0)) || ((i > 4) && (strcmp(&(p[i - 4]), ".htm") == 0))) BIO_puts(io, "HTTP/1.0 200 ok\r\nContent-type: text/html\r\n\r\n"); else BIO_puts(io, "HTTP/1.0 200 ok\r\nContent-type: text/plain\r\n\r\n"); } /* send the file */ for (;;) { i = BIO_read(file, buf, bufsize); if (i <= 0) break; #ifdef RENEG total_bytes += i; BIO_printf(bio_err, "%d\n", i); if (total_bytes > 3 * 1024) { total_bytes = 0; BIO_printf(bio_err, "RENEGOTIATE\n"); SSL_renegotiate(con); } #endif for (j = 0; j < i;) { #ifdef RENEG { static count = 0; if (++count == 13) { SSL_renegotiate(con); } } #endif k = BIO_write(io, &(buf[j]), i - j); if (k <= 0) { if (!BIO_should_retry(io) && !SSL_waiting_for_async(con)) goto write_error; else { BIO_printf(bio_s_out, "rwrite W BLOCK\n"); } } else { j += k; } } } write_error: BIO_free(file); break; } } for (;;) { i = (int)BIO_flush(io); if (i <= 0) { if (!BIO_should_retry(io)) break; } else break; } end: /* make sure we re-use sessions */ SSL_set_shutdown(con, SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN); err: if (ret >= 0) BIO_printf(bio_s_out, "ACCEPT\n"); OPENSSL_free(buf); BIO_free_all(io); return (ret); }
0
[ "CWE-399" ]
openssl
380f18ed5f140e0ae1b68f3ab8f4f7c395658d9e
338,627,872,191,166,760,000,000,000,000,000,000,000
387
CVE-2016-0798: avoid memory leak in SRP The SRP user database lookup method SRP_VBASE_get_by_user had confusing memory management semantics; the returned pointer was sometimes newly allocated, and sometimes owned by the callee. The calling code has no way of distinguishing these two cases. Specifically, SRP servers that configure a secret seed to hide valid login information are vulnerable to a memory leak: an attacker connecting with an invalid username can cause a memory leak of around 300 bytes per connection. Servers that do not configure SRP, or configure SRP but do not configure a seed are not vulnerable. In Apache, the seed directive is known as SSLSRPUnknownUserSeed. To mitigate the memory leak, the seed handling in SRP_VBASE_get_by_user is now disabled even if the user has configured a seed. Applications are advised to migrate to SRP_VBASE_get1_by_user. However, note that OpenSSL makes no strong guarantees about the indistinguishability of valid and invalid logins. In particular, computations are currently not carried out in constant time. Reviewed-by: Rich Salz <[email protected]>
void CServer::ConchainRconPasswordSet(IConsole::IResult *pResult, void *pUserData, IConsole::FCommandCallback pfnCallback, void *pCallbackUserData) { pfnCallback(pResult, pCallbackUserData); if(pResult->NumArguments() >= 1) { static_cast<CServer *>(pUserData)->m_RconPasswordSet = 1; } }
0
[ "CWE-20", "CWE-703", "CWE-400" ]
teeworlds
c68402fa7e279d42886d5951d1ea8ac2facc1ea5
101,789,145,618,719,030,000,000,000,000,000,000,000
8
changed a check
template<typename tf> static CImg<floatT> cylinder3d(CImgList<tf>& primitives, const float radius=50, const float size_z=100, const unsigned int subdivisions=24) { primitives.assign(); if (!subdivisions) return CImg<floatT>(); CImgList<floatT> vertices(2,1,3,1,1, 0.,0.,0., 0.,0.,size_z); for (float delta = 360.f/subdivisions, angle = 0; angle<360; angle+=delta) { const float a = (float)(angle*cimg::PI/180); CImg<floatT>::vector((float)(radius*std::cos(a)),(float)(radius*std::sin(a)),0.f).move_to(vertices); CImg<floatT>::vector((float)(radius*std::cos(a)),(float)(radius*std::sin(a)),size_z).move_to(vertices); } const unsigned int nbr = (vertices._width - 2)/2; for (unsigned int p = 0; p<nbr; ++p) { const unsigned int curr = 2 + 2*p, next = 2 + (2*((p + 1)%nbr)); CImg<tf>::vector(0,next,curr).move_to(primitives); CImg<tf>::vector(1,curr + 1,next + 1).move_to(primitives); CImg<tf>::vector(curr,next,next + 1,curr + 1).move_to(primitives); } return vertices>'x';
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
244,683,798,556,196,730,000,000,000,000,000,000,000
21
.
void qdisc_put_stab(struct qdisc_size_table *tab) { if (!tab) return; spin_lock(&qdisc_stab_lock); if (--tab->refcnt == 0) { list_del(&tab->list); call_rcu_bh(&tab->rcu, stab_kfree_rcu); } spin_unlock(&qdisc_stab_lock); }
0
[ "CWE-264" ]
net
90f62cf30a78721641e08737bda787552428061e
85,376,375,696,980,600,000,000,000,000,000,000,000
14
net: Use netlink_ns_capable to verify the permisions of netlink messages It is possible by passing a netlink socket to a more privileged executable and then to fool that executable into writing to the socket data that happens to be valid netlink message to do something that privileged executable did not intend to do. To keep this from happening replace bare capable and ns_capable calls with netlink_capable, netlink_net_calls and netlink_ns_capable calls. Which act the same as the previous calls except they verify that the opener of the socket had the desired permissions as well. Reported-by: Andy Lutomirski <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]> Signed-off-by: David S. Miller <[email protected]>
input_csi_dispatch_sgr_256(struct input_ctx *ictx, int fgbg, u_int *i) { struct grid_cell *gc = &ictx->cell.cell; int c; (*i)++; c = input_get(ictx, *i, 0, -1); if (c == -1) { if (fgbg == 38) { gc->flags &= ~GRID_FLAG_FG256; gc->fg = 8; } else if (fgbg == 48) { gc->flags &= ~GRID_FLAG_BG256; gc->bg = 8; } } else { if (fgbg == 38) { gc->flags |= GRID_FLAG_FG256; gc->fg = c; } else if (fgbg == 48) { gc->flags |= GRID_FLAG_BG256; gc->bg = c; } } }
0
[]
tmux
2ffbd5b5f05dded1564ba32a6a00b0b417439b2f
292,302,408,580,249,650,000,000,000,000,000,000,000
25
When searching for tabs, start from screen width, fixes out-of-bounds read found by Kuang-che Wu.
static SLJIT_INLINE void free_stack(compiler_common *common, int size) { DEFINE_COMPILER; SLJIT_ASSERT(size > 0); OP2(SLJIT_ADD, STACK_TOP, 0, STACK_TOP, 0, SLJIT_IMM, size * sizeof(sljit_sw)); }
0
[ "CWE-125" ]
php-src
8947fd9e9fdce87cd6c59817b1db58e789538fe9
113,433,684,695,915,620,000,000,000,000,000,000,000
7
Fix #78338: Array cross-border reading in PCRE We backport r1092 from pcre2.
GIOStream *vdagent_socket_connect(const gchar *path, GError **err) { GSocketConnection *conn; GSocketAddress *addr; GSocketClient *client; addr = g_unix_socket_address_new(path); client = g_object_new(G_TYPE_SOCKET_CLIENT, "family", G_SOCKET_FAMILY_UNIX, "type", G_SOCKET_TYPE_STREAM, NULL); conn = g_socket_client_connect(client, G_SOCKET_CONNECTABLE(addr), NULL, err); g_object_unref(client); g_object_unref(addr); return G_IO_STREAM(conn); }
0
[ "CWE-362" ]
spice-vd_agent
51c415df82a52e9ec033225783c77df95f387891
38,454,491,036,966,795,000,000,000,000,000,000,000
16
Avoids user session hijacking Avoids user hijacking sessions by reusing PID. In theory an attacker could: - open a connection to the daemon; - fork and exit the process but keep the file descriptor open (inheriting or duplicating it in forked process); - force OS to recycle the initial PID, by creating many short lived processes. Daemon would detect the old PID as having the new session. Check the user to avoid such replacements. This issue was reported by SUSE security team. Signed-off-by: Frediano Ziglio <[email protected]> Acked-by: Uri Lublin <[email protected]>
process_set_cookie_header (SoupMessage *msg, gpointer user_data) { SoupCookieJar *jar = user_data; SoupCookieJarPrivate *priv = soup_cookie_jar_get_instance_private (jar); GSList *new_cookies, *nc; if (priv->accept_policy == SOUP_COOKIE_JAR_ACCEPT_NEVER) return; new_cookies = soup_cookies_from_response (msg); for (nc = new_cookies; nc; nc = nc->next) { SoupURI *first_party = soup_message_get_first_party (msg); if ((priv->accept_policy == SOUP_COOKIE_JAR_ACCEPT_NO_THIRD_PARTY && !incoming_cookie_is_third_party (nc->data, first_party)) || priv->accept_policy == SOUP_COOKIE_JAR_ACCEPT_ALWAYS) soup_cookie_jar_add_cookie (jar, nc->data); else soup_cookie_free (nc->data); } g_slist_free (new_cookies); }
0
[ "CWE-125" ]
libsoup
db2b0d5809d5f8226d47312b40992cadbcde439f
129,260,560,196,138,980,000,000,000,000,000,000,000
22
cookie-jar: bail if hostname is an empty string There are several other ways to fix the problem with this function, but skipping over all of the code is probably the simplest. Fixes #3
SPL_METHOD(SplFileObject, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long line_pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &line_pos) == FAILURE) { return; } if (line_pos < 0) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't seek file %s to negative line %ld", intern->file_name, line_pos); RETURN_FALSE; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); while(intern->u.file.current_line_num < line_pos) { if (spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC) == FAILURE) { break; } } } /* }}} */
1
[ "CWE-190" ]
php-src
7245bff300d3fa8bacbef7897ff080a6f1c23eba
274,084,651,794,578,100,000,000,000,000,000,000,000
21
Fix bug #72262 - do not overflow int
GF_Err stsz_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, estSize; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; if (ptr == NULL) return GF_BAD_PARAM; //support for CompactSizes if (s->type == GF_ISOM_BOX_TYPE_STSZ) { ISOM_DECREASE_SIZE(ptr, 8); ptr->sampleSize = gf_bs_read_u32(bs); ptr->sampleCount = gf_bs_read_u32(bs); } else { //24-reserved ISOM_DECREASE_SIZE(ptr, 8); gf_bs_read_int(bs, 24); i = gf_bs_read_u8(bs); ptr->sampleCount = gf_bs_read_u32(bs); switch (i) { case 4: case 8: case 16: ptr->sampleSize = i; break; default: //try to fix the file //no samples, no parsing pb if (!ptr->sampleCount) { ptr->sampleSize = 16; return GF_OK; } estSize = (u32) (ptr->size) / ptr->sampleCount; if (!estSize && ((ptr->sampleCount+1)/2 == (ptr->size)) ) { ptr->sampleSize = 4; break; } else if (estSize == 1 || estSize == 2) { ptr->sampleSize = 8 * estSize; } else { return GF_ISOM_INVALID_FILE; } } } if (s->type == GF_ISOM_BOX_TYPE_STSZ) { if (! ptr->sampleSize && ptr->sampleCount) { if (ptr->sampleCount > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); if (! ptr->sizes) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->sampleCount; for (i = 0; i < ptr->sampleCount; i++) { ptr->sizes[i] = gf_bs_read_u32(bs); if (ptr->max_size < ptr->sizes[i]) ptr->max_size = ptr->sizes[i]; ptr->total_size += ptr->sizes[i]; ptr->total_samples++; } } } else { if (ptr->sampleSize==4) { if (ptr->sampleCount / 2 > ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } else { if (ptr->sampleCount > ptr->size / (ptr->sampleSize/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } //note we could optimize the mem usage by keeping the table compact //in memory. But that would complicate both caching and editing //we therefore keep all sizes as u32 and uncompress the table ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); if (! ptr->sizes) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->sampleCount; for (i = 0; i < ptr->sampleCount; ) { switch (ptr->sampleSize) { case 4: ptr->sizes[i] = gf_bs_read_int(bs, 4); if (i+1 < ptr->sampleCount) { ptr->sizes[i+1] = gf_bs_read_int(bs, 4); } else { //0 padding in odd sample count gf_bs_read_int(bs, 4); } i += 2; break; default: ptr->sizes[i] = gf_bs_read_int(bs, ptr->sampleSize); i += 1; break; } if (ptr->max_size < ptr->sizes[i]) ptr->max_size = ptr->sizes[i]; ptr->total_size += ptr->sizes[i]; ptr->total_samples++; } } return GF_OK; }
0
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
318,332,903,719,333,970,000,000,000,000,000,000,000
102
fixed #1587
Http::FilterFactoryCb createFilter(const std::string&, Server::Configuration::FactoryContext&) override { return [](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared<::Envoy::BufferContinueStreamFilter>()); }; }
0
[ "CWE-416" ]
envoy
148de954ed3585d8b4298b424aa24916d0de6136
44,379,241,917,185,700,000,000,000,000,000,000,000
6
CVE-2021-43825 Response filter manager crash Signed-off-by: Yan Avlasov <[email protected]>
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){ long i; for(i=0; i<=w-sizeof(long); i+=sizeof(long)){ long a = *(long*)(src+i); long b = *(long*)(dst+i); *(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80); } for(; i<w; i++) dst[i+0] += src[i+0]; }
1
[ "CWE-703", "CWE-189" ]
FFmpeg
454a11a1c9c686c78aa97954306fb63453299760
283,311,963,725,008,500,000,000,000,000,000,000,000
10
avcodec/dsputil: fix signedness in sizeof() comparissions Signed-off-by: Michael Niedermayer <[email protected]>
static double mp_asin(_cimg_math_parser& mp) { return std::asin(_mp_arg(2));
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
131,715,367,442,866,400,000,000,000,000,000,000,000
3
Fix other issues in 'CImg<T>::load_bmp()'.
static void imap_hcache_namer (const char *path, BUFFER *dest) { mutt_buffer_printf (dest, "%s.hcache", path); }
0
[ "CWE-125" ]
mutt
7c4779ac24d2fb68a2a47b58c7904118f40965d5
130,221,371,673,700,440,000,000,000,000,000,000,000
4
Fix seqset iterator when it ends in a comma. If the seqset ended with a comma, the substr_end marker would be just before the trailing nul. In the next call, the loop to skip the marker would iterate right past the end of string too. The fix is simple: place the substr_end marker and skip past it immediately.
void badLength(bool v) { this->bad_length = v; }
0
[ "CWE-125" ]
qpdf
1868a10f8b06631362618bfc85ca8646da4b4b71
312,761,792,274,680,900,000,000,000,000,000,000,000
4
Replace all atoi calls with QUtil::string_to_int The latter catches underflow/overflow.
sfe_apply_metadata_changes (const char * filenames [2], const METADATA_INFO * info) { SNDFILE *infile = NULL, *outfile = NULL ; SF_INFO sfinfo ; METADATA_INFO tmpinfo ; int error_code = 0 ; memset (&sfinfo, 0, sizeof (sfinfo)) ; memset (&tmpinfo, 0, sizeof (tmpinfo)) ; if (filenames [1] == NULL) infile = outfile = sf_open (filenames [0], SFM_RDWR, &sfinfo) ; else { infile = sf_open (filenames [0], SFM_READ, &sfinfo) ; /* Output must be WAV. */ sfinfo.format = SF_FORMAT_WAV | (SF_FORMAT_SUBMASK & sfinfo.format) ; outfile = sf_open (filenames [1], SFM_WRITE, &sfinfo) ; } ; if (infile == NULL) { printf ("Error : Not able to open input file '%s' : %s\n", filenames [0], sf_strerror (infile)) ; error_code = 1 ; goto cleanup_exit ; } ; if (outfile == NULL) { printf ("Error : Not able to open output file '%s' : %s\n", filenames [1], sf_strerror (outfile)) ; error_code = 1 ; goto cleanup_exit ; } ; if (info->has_bext_fields && merge_broadcast_info (infile, outfile, sfinfo.format, info)) { error_code = 1 ; goto cleanup_exit ; } ; if (infile != outfile) { int infileminor = SF_FORMAT_SUBMASK & sfinfo.format ; /* If the input file is not the same as the output file, copy the data. */ if ((infileminor == SF_FORMAT_DOUBLE) || (infileminor == SF_FORMAT_FLOAT)) sfe_copy_data_fp (outfile, infile, sfinfo.channels, SF_FALSE) ; else sfe_copy_data_int (outfile, infile, sfinfo.channels) ; } ; update_strings (outfile, info) ; cleanup_exit : if (outfile != NULL && outfile != infile) sf_close (outfile) ; if (infile != NULL) sf_close (infile) ; if (error_code) exit (error_code) ; return ; } /* sfe_apply_metadata_changes */
1
[ "CWE-125" ]
libsndfile
2d54514a4f6437b67829717c05472d2e3300a258
185,070,937,204,213,700,000,000,000,000,000,000,000
61
sfe_copy_data_fp: check value of "max" variable for being normal and check elements of the data[] array for being finite. Both checks use functions provided by the <math.h> header as declared by the C99 standard. Fixes #317 CVE-2017-14245 CVE-2017-14246
static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; return args->location->objectid == BTRFS_I(inode)->location.objectid && args->root == BTRFS_I(inode)->root; }
0
[ "CWE-200" ]
linux
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
187,066,343,438,877,170,000,000,000,000,000,000,000
6
Btrfs: fix truncation of compressed and inlined extents When truncating a file to a smaller size which consists of an inline extent that is compressed, we did not discard (or made unusable) the data between the new file size and the old file size, wasting metadata space and allowing for the truncated data to be leaked and the data corruption/loss mentioned below. We were also not correctly decrementing the number of bytes used by the inode, we were setting it to zero, giving a wrong report for callers of the stat(2) syscall. The fsck tool also reported an error about a mismatch between the nbytes of the file versus the real space used by the file. Now because we weren't discarding the truncated region of the file, it was possible for a caller of the clone ioctl to actually read the data that was truncated, allowing for a security breach without requiring root access to the system, using only standard filesystem operations. The scenario is the following: 1) User A creates a file which consists of an inline and compressed extent with a size of 2000 bytes - the file is not accessible to any other users (no read, write or execution permission for anyone else); 2) The user truncates the file to a size of 1000 bytes; 3) User A makes the file world readable; 4) User B creates a file consisting of an inline extent of 2000 bytes; 5) User B issues a clone operation from user A's file into its own file (using a length argument of 0, clone the whole range); 6) User B now gets to see the 1000 bytes that user A truncated from its file before it made its file world readbale. User B also lost the bytes in the range [1000, 2000[ bytes from its own file, but that might be ok if his/her intention was reading stale data from user A that was never supposed to be public. Note that this contrasts with the case where we truncate a file from 2000 bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In this case reading any byte from the range [1000, 2000[ will return a value of 0x00, instead of the original data. This problem exists since the clone ioctl was added and happens both with and without my recent data loss and file corruption fixes for the clone ioctl (patch "Btrfs: fix file corruption and data loss after cloning inline extents"). So fix this by truncating the compressed inline extents as we do for the non-compressed case, which involves decompressing, if the data isn't already in the page cache, compressing the truncated version of the extent, writing the compressed content into the inline extent and then truncate it. The following test case for fstests reproduces the problem. In order for the test to pass both this fix and my previous fix for the clone ioctl that forbids cloning a smaller inline extent into a larger one, which is titled "Btrfs: fix file corruption and data loss after cloning inline extents", are needed. Without that other fix the test fails in a different way that does not leak the truncated data, instead part of destination file gets replaced with zeroes (because the destination file has a larger inline extent than the source). seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 _cleanup() { rm -f $tmp.* } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs btrfs _supported_os Linux _require_scratch _require_cloner rm -f $seqres.full _scratch_mkfs >>$seqres.full 2>&1 _scratch_mount "-o compress" # Create our test files. File foo is going to be the source of a clone operation # and consists of a single inline extent with an uncompressed size of 512 bytes, # while file bar consists of a single inline extent with an uncompressed size of # 256 bytes. For our test's purpose, it's important that file bar has an inline # extent with a size smaller than foo's inline extent. $XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \ -c "pwrite -S 0x2a 128 384" \ $SCRATCH_MNT/foo | _filter_xfs_io $XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io # Now durably persist all metadata and data. We do this to make sure that we get # on disk an inline extent with a size of 512 bytes for file foo. sync # Now truncate our file foo to a smaller size. Because it consists of a # compressed and inline extent, btrfs did not shrink the inline extent to the # new size (if the extent was not compressed, btrfs would shrink it to 128 # bytes), it only updates the inode's i_size to 128 bytes. $XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo # Now clone foo's inline extent into bar. # This clone operation should fail with errno EOPNOTSUPP because the source # file consists only of an inline extent and the file's size is smaller than # the inline extent of the destination (128 bytes < 256 bytes). However the # clone ioctl was not prepared to deal with a file that has a size smaller # than the size of its inline extent (something that happens only for compressed # inline extents), resulting in copying the full inline extent from the source # file into the destination file. # # Note that btrfs' clone operation for inline extents consists of removing the # inline extent from the destination inode and copy the inline extent from the # source inode into the destination inode, meaning that if the destination # inode's inline extent is larger (N bytes) than the source inode's inline # extent (M bytes), some bytes (N - M bytes) will be lost from the destination # file. Btrfs could copy the source inline extent's data into the destination's # inline extent so that we would not lose any data, but that's currently not # done due to the complexity that would be needed to deal with such cases # (specially when one or both extents are compressed), returning EOPNOTSUPP, as # it's normally not a very common case to clone very small files (only case # where we get inline extents) and copying inline extents does not save any # space (unlike for normal, non-inlined extents). $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar # Now because the above clone operation used to succeed, and due to foo's inline # extent not being shinked by the truncate operation, our file bar got the whole # inline extent copied from foo, making us lose the last 128 bytes from bar # which got replaced by the bytes in range [128, 256[ from foo before foo was # truncated - in other words, data loss from bar and being able to read old and # stale data from foo that should not be possible to read anymore through normal # filesystem operations. Contrast with the case where we truncate a file from a # size N to a smaller size M, truncate it back to size N and then read the range # [M, N[, we should always get the value 0x00 for all the bytes in that range. # We expected the clone operation to fail with errno EOPNOTSUPP and therefore # not modify our file's bar data/metadata. So its content should be 256 bytes # long with all bytes having the value 0xbb. # # Without the btrfs bug fix, the clone operation succeeded and resulted in # leaking truncated data from foo, the bytes that belonged to its range # [128, 256[, and losing data from bar in that same range. So reading the # file gave us the following content: # # 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 # * # 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a # * # 0000400 echo "File bar's content after the clone operation:" od -t x1 $SCRATCH_MNT/bar # Also because the foo's inline extent was not shrunk by the truncate # operation, btrfs' fsck, which is run by the fstests framework everytime a # test completes, failed reporting the following error: # # root 5 inode 257 errors 400, nbytes wrong status=0 exit Cc: [email protected] Signed-off-by: Filipe Manana <[email protected]>
static void spl_array_it_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT) { zend_user_it_get_current_data(iter, data TSRMLS_CC); } else { if (zend_hash_get_current_data_ex(aht, (void**)data, &object->pos) == FAILURE) { *data = NULL; } } }
0
[]
php-src
a374dfab567ff7f0ab0dc150f14cc891b0340b47
211,857,085,270,447,600,000,000,000,000,000,000,000
14
Fix bug #67492: unserialize() SPL ArrayObject / SPLObjectStorage Type Confusion
_add_starting_step(uint16_t type, void *req) { starting_step_t *starting_step; int rc = SLURM_SUCCESS; /* Add the step info to a list of starting processes that cannot reliably be contacted. */ slurm_mutex_lock(&conf->starting_steps_lock); starting_step = xmalloc(sizeof(starting_step_t)); if (!starting_step) { error("%s failed to allocate memory", __func__); rc = SLURM_FAILURE; goto fail; } switch (type) { case LAUNCH_BATCH_JOB: starting_step->job_id = ((batch_job_launch_msg_t *)req)->job_id; starting_step->step_id = ((batch_job_launch_msg_t *)req)->step_id; break; case LAUNCH_TASKS: starting_step->job_id = ((launch_tasks_request_msg_t *)req)->job_id; starting_step->step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; break; case REQUEST_LAUNCH_PROLOG: starting_step->job_id = ((prolog_launch_msg_t *)req)->job_id; starting_step->step_id = SLURM_EXTERN_CONT; break; default: error("%s called with an invalid type: %u", __func__, type); rc = SLURM_FAILURE; xfree(starting_step); goto fail; } if (!list_append(conf->starting_steps, starting_step)) { error("%s failed to allocate memory for list", __func__); rc = SLURM_FAILURE; xfree(starting_step); goto fail; } fail: slurm_mutex_unlock(&conf->starting_steps_lock); return rc; }
0
[ "CWE-284" ]
slurm
92362a92fffe60187df61f99ab11c249d44120ee
119,867,455,732,729,000,000,000,000,000,000,000,000
49
Fix security issue in _prolog_error(). Fix security issue caused by insecure file path handling triggered by the failure of a Prolog script. To exploit this a user needs to anticipate or cause the Prolog to fail for their job. (This commit is slightly different from the fix to the 15.08 branch.) CVE-2016-10030.
bool str_is_correct_filename(const char *str) { #define NOT_PRINTABLE(c) (c < ' ' || c == 0x7f) if (NOT_PRINTABLE(*str) || *str == '/' || *str == '\0') return false; ++str; if (NOT_PRINTABLE(*str) || *str =='/' || (*str == '\0' && *(str-1) == '.')) return false; ++str; if (NOT_PRINTABLE(*str) || *str =='/' || (*str == '\0' && *(str-1) == '.' && *(str-2) == '.')) return false; ++str; for (unsigned i = 0; *str != '\0' && i < 61; ++str, ++i) if (NOT_PRINTABLE(*str) || *str == '/') return false; return *str == '\0'; #undef NOT_PRINTABLE }
0
[ "CWE-22" ]
libreport
54ecf8d017580b495d6501e53ca54e453a73a364
50,001,664,201,696,030,000,000,000,000,000,000,000
24
lib: add a function checking file names Move the code from ABRT and extend it a bit: * allow only 64 characters * allow '.' in names (vmcore_dmesg.txt) * forbid '/' * forbid "." * forbid ".." Related: #1214451 Signed-off-by: Jakub Filak <[email protected]>
\param mode Output type, can be primary { 0=files only | 1=folders only | 2=files + folders }. \param include_path Tell if \c path must be included in resulting filenames. \return A list of filenames. **/ inline CImgList<char> files(const char *const path, const bool is_pattern=false, const unsigned int mode=2, const bool include_path=false) { if (!path || !*path) return files("*",true,mode,include_path); CImgList<char> res; // If path is a valid folder name, ignore argument 'is_pattern'. const bool _is_pattern = is_pattern && !cimg::is_directory(path); bool is_root = false, is_current = false; cimg::unused(is_root,is_current); // Clean format of input path. CImg<char> pattern, _path = CImg<char>::string(path); #if cimg_OS==2 for (char *ps = _path; *ps; ++ps) if (*ps=='\\') *ps='/'; #endif char *pd = _path; for (char *ps = pd; *ps; ++ps) { if (*ps!='/' || *ps!=*(ps+1)) *(pd++) = *ps; } *pd = 0; unsigned int lp = (unsigned int)std::strlen(_path); if (!_is_pattern && lp && _path[lp - 1]=='/') { _path[lp - 1] = 0; --lp; #if cimg_OS!=2 is_root = !*_path; #endif } // Separate folder path and matching pattern. if (_is_pattern) { const unsigned int bpos = (unsigned int)(cimg::basename(_path,'/') - _path.data()); CImg<char>::string(_path).move_to(pattern); if (bpos) { _path[bpos - 1] = 0; // End 'path' at last slash #if cimg_OS!=2 is_root = !*_path; #endif } else { // No path to folder specified, assuming current folder is_current = true; *_path = 0; } lp = (unsigned int)std::strlen(_path); } // Windows version. #if cimg_OS==2 if (!_is_pattern) { pattern.assign(lp + 3); std::memcpy(pattern,_path,lp); pattern[lp] = '/'; pattern[lp + 1] = '*'; pattern[lp + 2] = 0; } WIN32_FIND_DATAA file_data; const HANDLE dir = FindFirstFileA(pattern.data(),&file_data); if (dir==INVALID_HANDLE_VALUE) return CImgList<char>::const_empty(); do { const char *const filename = file_data.cFileName; if (*filename!='.' || (filename[1] && (filename[1]!='.' || filename[2]))) { const unsigned int lf = (unsigned int)std::strlen(filename); const bool is_directory = (file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)!=0; if ((!mode && !is_directory) || (mode==1 && is_directory) || mode>=2) { if (include_path) { CImg<char> full_filename((lp?lp+1:0) + lf + 1); if (lp) { std::memcpy(full_filename,_path,lp); full_filename[lp] = '/'; } std::memcpy(full_filename._data + (lp?lp + 1:0),filename,lf + 1); full_filename.move_to(res); } else CImg<char>(filename,lf + 1).move_to(res); } } } while (FindNextFileA(dir,&file_data)); FindClose(dir); // Unix version (posix). #elif cimg_OS == 1 DIR *const dir = opendir(is_root?"/":is_current?".":_path.data()); if (!dir) return CImgList<char>::const_empty(); struct dirent *ent; while ((ent=readdir(dir))!=0) { const char *const filename = ent->d_name; if (*filename!='.' || (filename[1] && (filename[1]!='.' || filename[2]))) { const unsigned int lf = (unsigned int)std::strlen(filename); CImg<char> full_filename(lp + lf + 2); if (!is_current) { full_filename.assign(lp + lf + 2); if (lp) std::memcpy(full_filename,_path,lp); full_filename[lp] = '/'; std::memcpy(full_filename._data + lp + 1,filename,lf + 1); } else full_filename.assign(filename,lf + 1); struct stat st; if (stat(full_filename,&st)==-1) continue; const bool is_directory = (st.st_mode & S_IFDIR)!=0; if ((!mode && !is_directory) || (mode==1 && is_directory) || mode==2) { if (include_path) { if (!_is_pattern || (_is_pattern && !fnmatch(pattern,full_filename,0))) full_filename.move_to(res); } else { if (!_is_pattern || (_is_pattern && !fnmatch(pattern,full_filename,0))) CImg<char>(filename,lf + 1).move_to(res); } } } } closedir(dir); #endif // Sort resulting list by lexicographic order.
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
237,992,039,529,520,200,000,000,000,000,000,000,000
108
.
RZ_API void rz_analysis_var_add_constraint(RzAnalysisVar *var, RZ_BORROW RzTypeConstraint *constraint) { rz_vector_push(&var->constraints, constraint); }
0
[ "CWE-703" ]
rizin
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
276,969,815,222,320,770,000,000,000,000,000,000,000
3
Initialize retctx,ctx before freeing the inner elements In rz_core_analysis_type_match retctx structure was initialized on the stack only after a "goto out_function", where a field of that structure was freed. When the goto path is taken, the field is not properly initialized and it cause cause a crash of Rizin or have other effects. Fixes: CVE-2021-4022
static tmbstr ParseAttribute( TidyDocImpl* doc, Bool *isempty, Node **asp, Node **php) { Lexer* lexer = doc->lexer; int start, len = 0; tmbstr attr = NULL; uint c, lastc; *asp = NULL; /* clear asp pointer */ *php = NULL; /* clear php pointer */ /* skip white space before the attribute */ for (;;) { c = TY_(ReadChar)( doc->docIn ); if (c == '/') { c = TY_(ReadChar)( doc->docIn ); if (c == '>') { *isempty = yes; return NULL; } TY_(UngetChar)(c, doc->docIn); c = '/'; break; } if (c == '>') return NULL; if (c =='<') { c = TY_(ReadChar)(doc->docIn); if (c == '%') { *asp = ParseAsp( doc ); return NULL; } else if (c == '?') { *php = ParsePhp( doc ); return NULL; } TY_(UngetChar)(c, doc->docIn); TY_(UngetChar)('<', doc->docIn); TY_(ReportAttrError)( doc, lexer->token, NULL, UNEXPECTED_GT ); return NULL; } if (c == '=') { TY_(ReportAttrError)( doc, lexer->token, NULL, UNEXPECTED_EQUALSIGN ); continue; } if (c == '"' || c == '\'') { TY_(ReportAttrError)( doc, lexer->token, NULL, UNEXPECTED_QUOTEMARK ); continue; } if (c == EndOfStream) { TY_(ReportAttrError)( doc, lexer->token, NULL, UNEXPECTED_END_OF_FILE_ATTR ); TY_(UngetChar)(c, doc->docIn); return NULL; } if (!TY_(IsWhite)(c)) break; } start = lexer->lexsize; lastc = c; for (;;) { /* but push back '=' for parseValue() */ if (c == '=' || c == '>') { TY_(UngetChar)(c, doc->docIn); break; } if (c == '<' || c == EndOfStream) { TY_(UngetChar)(c, doc->docIn); break; } if (lastc == '-' && (c == '"' || c == '\'')) { lexer->lexsize--; --len; TY_(UngetChar)(c, doc->docIn); break; } if (TY_(IsWhite)(c)) break; /* what should be done about non-namechar characters? */ /* currently these are incorporated into the attr name */ if ( !cfgBool(doc, TidyXmlTags) && TY_(IsUpper)(c) ) c = TY_(ToLower)(c); TY_(AddCharToLexer)( lexer, c ); lastc = c; c = TY_(ReadChar)(doc->docIn); } /* handle attribute names with multibyte chars */ len = lexer->lexsize - start; attr = (len > 0 ? TY_(tmbstrndup)(doc->allocator, lexer->lexbuf+start, len) : NULL); lexer->lexsize = start; return attr; }
0
[ "CWE-119" ]
tidy-html5
c18f27a58792f7fbd0b30a0ff50d6b40a82f940d
126,014,115,163,909,220,000,000,000,000,000,000,000
128
Issue #217 - avoid len going negative, ever...
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); vcpu->arch.tsc_offset_adjustment = 0; set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); } if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } /* * On a host with synchronized TSC, there is no need to update * kvmclock on vcpu->cpu migration */ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } accumulate_steal_time(vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); }
0
[ "CWE-119", "CWE-703", "CWE-120" ]
linux
a08d3b3b99efd509133946056531cdf8f3a0c09b
197,751,413,169,337,170,000,000,000,000,000,000,000
45
kvm: x86: fix emulator buffer overflow (CVE-2014-0049) The problem occurs when the guest performs a pusha with the stack address pointing to an mmio address (or an invalid guest physical address) to start with, but then extending into an ordinary guest physical address. When doing repeated emulated pushes emulator_read_write sets mmio_needed to 1 on the first one. On a later push when the stack points to regular memory, mmio_nr_fragments is set to 0, but mmio_is_needed is not set to 0. As a result, KVM exits to userspace, and then returns to complete_emulated_mmio. In complete_emulated_mmio vcpu->mmio_cur_fragment is incremented. The termination condition of vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments is never achieved. The code bounces back and fourth to userspace incrementing mmio_cur_fragment past it's buffer. If the guest does nothing else it eventually leads to a a crash on a memcpy from invalid memory address. However if a guest code can cause the vm to be destroyed in another vcpu with excellent timing, then kvm_clear_async_pf_completion_queue can be used by the guest to control the data that's pointed to by the call to cancel_work_item, which can be used to gain execution. Fixes: f78146b0f9230765c6315b2e14f56112513389ad Signed-off-by: Andrew Honig <[email protected]> Cc: [email protected] (3.5+) Signed-off-by: Paolo Bonzini <[email protected]>
static void opj_pi_update_encode_poc_and_final(opj_cp_t *p_cp, OPJ_UINT32 p_tileno, OPJ_UINT32 p_tx0, OPJ_UINT32 p_tx1, OPJ_UINT32 p_ty0, OPJ_UINT32 p_ty1, OPJ_UINT32 p_max_prec, OPJ_UINT32 p_max_res, OPJ_UINT32 p_dx_min, OPJ_UINT32 p_dy_min) { /* loop*/ OPJ_UINT32 pino; /* tile coding parameter*/ opj_tcp_t *l_tcp = 00; /* current poc being updated*/ opj_poc_t * l_current_poc = 00; /* number of pocs*/ OPJ_UINT32 l_poc_bound; OPJ_ARG_NOT_USED(p_max_res); /* preconditions in debug*/ assert(p_cp != 00); assert(p_tileno < p_cp->tw * p_cp->th); /* initializations*/ l_tcp = &p_cp->tcps [p_tileno]; /* number of iterations in the loop */ l_poc_bound = l_tcp->numpocs + 1; /* start at first element, and to make sure the compiler will not make a calculation each time in the loop store a pointer to the current element to modify rather than l_tcp->pocs[i]*/ l_current_poc = l_tcp->pocs; l_current_poc->compS = l_current_poc->compno0; l_current_poc->compE = l_current_poc->compno1; l_current_poc->resS = l_current_poc->resno0; l_current_poc->resE = l_current_poc->resno1; l_current_poc->layE = l_current_poc->layno1; /* special treatment for the first element*/ l_current_poc->layS = 0; l_current_poc->prg = l_current_poc->prg1; l_current_poc->prcS = 0; l_current_poc->prcE = p_max_prec; l_current_poc->txS = (OPJ_UINT32)p_tx0; l_current_poc->txE = (OPJ_UINT32)p_tx1; l_current_poc->tyS = (OPJ_UINT32)p_ty0; l_current_poc->tyE = (OPJ_UINT32)p_ty1; l_current_poc->dx = p_dx_min; l_current_poc->dy = p_dy_min; ++ l_current_poc; for (pino = 1; pino < l_poc_bound ; ++pino) { l_current_poc->compS = l_current_poc->compno0; l_current_poc->compE = l_current_poc->compno1; l_current_poc->resS = l_current_poc->resno0; l_current_poc->resE = l_current_poc->resno1; l_current_poc->layE = l_current_poc->layno1; l_current_poc->prg = l_current_poc->prg1; l_current_poc->prcS = 0; /* special treatment here different from the first element*/ l_current_poc->layS = (l_current_poc->layE > (l_current_poc - 1)->layE) ? l_current_poc->layE : 0; l_current_poc->prcE = p_max_prec; l_current_poc->txS = (OPJ_UINT32)p_tx0; l_current_poc->txE = (OPJ_UINT32)p_tx1; l_current_poc->tyS = (OPJ_UINT32)p_ty0; l_current_poc->tyE = (OPJ_UINT32)p_ty1; l_current_poc->dx = p_dx_min; l_current_poc->dy = p_dy_min; ++ l_current_poc; } }
0
[ "CWE-122" ]
openjpeg
00383e162ae2f8fc951f5745bf1011771acb8dce
120,391,726,112,038,240,000,000,000,000,000,000,000
78
pi.c: avoid out of bounds access with POC (refs https://github.com/uclouvain/openjpeg/issues/1293#issuecomment-737122836)
template<typename t> bool contains(const T& pixel, t& x) const { const T *const ppixel = &pixel; if (is_empty() || ppixel<_data || ppixel>=_data + size()) return false; x = (t)(((ulongT)(ppixel - _data))%_width); return true;
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
230,168,199,784,167,860,000,000,000,000,000,000,000
6
Fix other issues in 'CImg<T>::load_bmp()'.
static void vbg_input_close(struct input_dev *input) { struct vbg_dev *gdev = input_get_drvdata(input); vbg_core_set_mouse_status(gdev, 0); }
0
[ "CWE-362" ]
linux
bd23a7269834dc7c1f93e83535d16ebc44b75eba
64,295,121,663,714,690,000,000,000,000,000,000,000
6
virt: vbox: Only copy_from_user the request-header once In vbg_misc_device_ioctl(), the header of the ioctl argument is copied from the userspace pointer 'arg' and saved to the kernel object 'hdr'. Then the 'version', 'size_in', and 'size_out' fields of 'hdr' are verified. Before this commit, after the checks a buffer for the entire request would be allocated and then all data including the verified header would be copied from the userspace 'arg' pointer again. Given that the 'arg' pointer resides in userspace, a malicious userspace process can race to change the data pointed to by 'arg' between the two copies. By doing so, the user can bypass the verifications on the ioctl argument. This commit fixes this by using the already checked copy of the header to fill the header part of the allocated buffer and only copying the remainder of the data from userspace. Signed-off-by: Wenwen Wang <[email protected]> Reviewed-by: Hans de Goede <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
PackLinuxElf32mipseb::buildLoader(Filter const *ft) { buildLinuxLoader( stub_mips_r3000_linux_elf_entry, sizeof(stub_mips_r3000_linux_elf_entry), stub_mips_r3000_linux_elf_fold, sizeof(stub_mips_r3000_linux_elf_fold), ft); }
0
[ "CWE-476" ]
upx
ef336dbcc6dc8344482f8cf6c909ae96c3286317
252,105,070,227,830,360,000,000,000,000,000,000,000
6
Protect against bad crafted input. https://github.com/upx/upx/issues/128 modified: p_lx_elf.cpp
f_filereadable(typval_T *argvars, typval_T *rettv) { int fd; char_u *p; int n; #ifndef O_NONBLOCK # define O_NONBLOCK 0 #endif p = tv_get_string(&argvars[0]); if (*p && !mch_isdir(p) && (fd = mch_open((char *)p, O_RDONLY | O_NONBLOCK, 0)) >= 0) { n = TRUE; close(fd); } else n = FALSE; rettv->vval.v_number = n; }
0
[ "CWE-78" ]
vim
8c62a08faf89663e5633dc5036cd8695c80f1075
276,945,930,716,734,130,000,000,000,000,000,000,000
21
patch 8.1.0881: can execute shell commands in rvim through interfaces Problem: Can execute shell commands in rvim through interfaces. Solution: Disable using interfaces in restricted mode. Allow for writing file with writefile(), histadd() and a few others.
void vcc_insert_socket(struct sock *sk) { write_lock_irq(&vcc_sklist_lock); __vcc_insert_socket(sk); write_unlock_irq(&vcc_sklist_lock); }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
242,595,753,227,098,900,000,000,000,000,000,000,000
6
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int __readpage_endio_check(struct inode *inode, struct btrfs_io_bio *io_bio, int icsum, struct page *page, int pgoff, u64 start, size_t len) { char *kaddr; u32 csum_expected; u32 csum = ~(u32)0; csum_expected = *(((u32 *)io_bio->csum) + icsum); kaddr = kmap_atomic(page); csum = btrfs_csum_data(kaddr + pgoff, csum, len); btrfs_csum_final(csum, (char *)&csum); if (csum != csum_expected) goto zeroit; kunmap_atomic(kaddr); return 0; zeroit: btrfs_warn_rl(BTRFS_I(inode)->root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u", btrfs_ino(inode), start, csum, csum_expected); memset(kaddr + pgoff, 1, len); flush_dcache_page(page); kunmap_atomic(kaddr); if (csum_expected == 0) return 0; return -EIO; }
0
[ "CWE-200" ]
linux
0305cd5f7fca85dae392b9ba85b116896eb7c1c7
77,273,179,781,773,410,000,000,000,000,000,000,000
30
Btrfs: fix truncation of compressed and inlined extents When truncating a file to a smaller size which consists of an inline extent that is compressed, we did not discard (or made unusable) the data between the new file size and the old file size, wasting metadata space and allowing for the truncated data to be leaked and the data corruption/loss mentioned below. We were also not correctly decrementing the number of bytes used by the inode, we were setting it to zero, giving a wrong report for callers of the stat(2) syscall. The fsck tool also reported an error about a mismatch between the nbytes of the file versus the real space used by the file. Now because we weren't discarding the truncated region of the file, it was possible for a caller of the clone ioctl to actually read the data that was truncated, allowing for a security breach without requiring root access to the system, using only standard filesystem operations. The scenario is the following: 1) User A creates a file which consists of an inline and compressed extent with a size of 2000 bytes - the file is not accessible to any other users (no read, write or execution permission for anyone else); 2) The user truncates the file to a size of 1000 bytes; 3) User A makes the file world readable; 4) User B creates a file consisting of an inline extent of 2000 bytes; 5) User B issues a clone operation from user A's file into its own file (using a length argument of 0, clone the whole range); 6) User B now gets to see the 1000 bytes that user A truncated from its file before it made its file world readbale. User B also lost the bytes in the range [1000, 2000[ bytes from its own file, but that might be ok if his/her intention was reading stale data from user A that was never supposed to be public. Note that this contrasts with the case where we truncate a file from 2000 bytes to 1000 bytes and then truncate it back from 1000 to 2000 bytes. In this case reading any byte from the range [1000, 2000[ will return a value of 0x00, instead of the original data. This problem exists since the clone ioctl was added and happens both with and without my recent data loss and file corruption fixes for the clone ioctl (patch "Btrfs: fix file corruption and data loss after cloning inline extents"). So fix this by truncating the compressed inline extents as we do for the non-compressed case, which involves decompressing, if the data isn't already in the page cache, compressing the truncated version of the extent, writing the compressed content into the inline extent and then truncate it. The following test case for fstests reproduces the problem. In order for the test to pass both this fix and my previous fix for the clone ioctl that forbids cloning a smaller inline extent into a larger one, which is titled "Btrfs: fix file corruption and data loss after cloning inline extents", are needed. Without that other fix the test fails in a different way that does not leak the truncated data, instead part of destination file gets replaced with zeroes (because the destination file has a larger inline extent than the source). seq=`basename $0` seqres=$RESULT_DIR/$seq echo "QA output created by $seq" tmp=/tmp/$$ status=1 # failure is the default! trap "_cleanup; exit \$status" 0 1 2 3 15 _cleanup() { rm -f $tmp.* } # get standard environment, filters and checks . ./common/rc . ./common/filter # real QA test starts here _need_to_be_root _supported_fs btrfs _supported_os Linux _require_scratch _require_cloner rm -f $seqres.full _scratch_mkfs >>$seqres.full 2>&1 _scratch_mount "-o compress" # Create our test files. File foo is going to be the source of a clone operation # and consists of a single inline extent with an uncompressed size of 512 bytes, # while file bar consists of a single inline extent with an uncompressed size of # 256 bytes. For our test's purpose, it's important that file bar has an inline # extent with a size smaller than foo's inline extent. $XFS_IO_PROG -f -c "pwrite -S 0xa1 0 128" \ -c "pwrite -S 0x2a 128 384" \ $SCRATCH_MNT/foo | _filter_xfs_io $XFS_IO_PROG -f -c "pwrite -S 0xbb 0 256" $SCRATCH_MNT/bar | _filter_xfs_io # Now durably persist all metadata and data. We do this to make sure that we get # on disk an inline extent with a size of 512 bytes for file foo. sync # Now truncate our file foo to a smaller size. Because it consists of a # compressed and inline extent, btrfs did not shrink the inline extent to the # new size (if the extent was not compressed, btrfs would shrink it to 128 # bytes), it only updates the inode's i_size to 128 bytes. $XFS_IO_PROG -c "truncate 128" $SCRATCH_MNT/foo # Now clone foo's inline extent into bar. # This clone operation should fail with errno EOPNOTSUPP because the source # file consists only of an inline extent and the file's size is smaller than # the inline extent of the destination (128 bytes < 256 bytes). However the # clone ioctl was not prepared to deal with a file that has a size smaller # than the size of its inline extent (something that happens only for compressed # inline extents), resulting in copying the full inline extent from the source # file into the destination file. # # Note that btrfs' clone operation for inline extents consists of removing the # inline extent from the destination inode and copy the inline extent from the # source inode into the destination inode, meaning that if the destination # inode's inline extent is larger (N bytes) than the source inode's inline # extent (M bytes), some bytes (N - M bytes) will be lost from the destination # file. Btrfs could copy the source inline extent's data into the destination's # inline extent so that we would not lose any data, but that's currently not # done due to the complexity that would be needed to deal with such cases # (specially when one or both extents are compressed), returning EOPNOTSUPP, as # it's normally not a very common case to clone very small files (only case # where we get inline extents) and copying inline extents does not save any # space (unlike for normal, non-inlined extents). $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/foo $SCRATCH_MNT/bar # Now because the above clone operation used to succeed, and due to foo's inline # extent not being shinked by the truncate operation, our file bar got the whole # inline extent copied from foo, making us lose the last 128 bytes from bar # which got replaced by the bytes in range [128, 256[ from foo before foo was # truncated - in other words, data loss from bar and being able to read old and # stale data from foo that should not be possible to read anymore through normal # filesystem operations. Contrast with the case where we truncate a file from a # size N to a smaller size M, truncate it back to size N and then read the range # [M, N[, we should always get the value 0x00 for all the bytes in that range. # We expected the clone operation to fail with errno EOPNOTSUPP and therefore # not modify our file's bar data/metadata. So its content should be 256 bytes # long with all bytes having the value 0xbb. # # Without the btrfs bug fix, the clone operation succeeded and resulted in # leaking truncated data from foo, the bytes that belonged to its range # [128, 256[, and losing data from bar in that same range. So reading the # file gave us the following content: # # 0000000 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 a1 # * # 0000200 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a 2a # * # 0000400 echo "File bar's content after the clone operation:" od -t x1 $SCRATCH_MNT/bar # Also because the foo's inline extent was not shrunk by the truncate # operation, btrfs' fsck, which is run by the fstests framework everytime a # test completes, failed reporting the following error: # # root 5 inode 257 errors 400, nbytes wrong status=0 exit Cc: [email protected] Signed-off-by: Filipe Manana <[email protected]>
static double mp_find(_cimg_math_parser& mp) { const bool is_forward = (bool)_mp_arg(5); const ulongT siz = (ulongT)mp.opcode[3]; longT ind = (longT)(mp.opcode[6]!=_cimg_mp_slot_nan?_mp_arg(6):is_forward?0:siz - 1); if (ind<0 || ind>=(longT)siz) return -1.; const double *const ptrb = &_mp_arg(2) + 1, *const ptre = ptrb + siz, val = _mp_arg(4), *ptr = ptrb + ind; // Forward search if (is_forward) { while (ptr<ptre && *ptr!=val) ++ptr; return ptr==ptre?-1.:(double)(ptr - ptrb); } // Backward search. while (ptr>=ptrb && *ptr!=val) --ptr; return ptr<ptrb?-1.:(double)(ptr - ptrb);
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
164,429,061,376,241,160,000,000,000,000,000,000,000
21
Fix other issues in 'CImg<T>::load_bmp()'.
init_global_keywords(bool global_active) { /* global definitions mapping */ install_keyword_root("linkbeat_use_polling", use_polling_handler, global_active); #if HAVE_DECL_CLONE_NEWNET install_keyword_root("net_namespace", &net_namespace_handler, global_active); install_keyword_root("namespace_with_ipsets", &namespace_ipsets_handler, global_active); #endif install_keyword_root("use_pid_dir", &use_pid_dir_handler, global_active); install_keyword_root("instance", &instance_handler, global_active); install_keyword_root("child_wait_time", &child_wait_handler, global_active); install_keyword_root("global_defs", NULL, global_active); install_keyword("router_id", &routerid_handler); install_keyword("notification_email_from", &emailfrom_handler); install_keyword("smtp_server", &smtpserver_handler); install_keyword("smtp_helo_name", &smtphelo_handler); install_keyword("smtp_connect_timeout", &smtpto_handler); install_keyword("notification_email", &email_handler); install_keyword("smtp_alert", &smtp_alert_handler); #ifdef _WITH_VRRP_ install_keyword("smtp_alert_vrrp", &smtp_alert_vrrp_handler); #endif #ifdef _WITH_LVS_ install_keyword("smtp_alert_checker", &smtp_alert_checker_handler); #endif #ifdef _WITH_VRRP_ install_keyword("dynamic_interfaces", &dynamic_interfaces_handler); install_keyword("no_email_faults", &no_email_faults_handler); install_keyword("default_interface", &default_interface_handler); #endif #ifdef _WITH_LVS_ install_keyword("lvs_timeouts", &lvs_timeouts); install_keyword("lvs_flush", &lvs_flush_handler); #ifdef _WITH_VRRP_ install_keyword("lvs_sync_daemon", &lvs_syncd_handler); #endif #endif #ifdef _WITH_VRRP_ install_keyword("vrrp_mcast_group4", &vrrp_mcast_group4_handler); install_keyword("vrrp_mcast_group6", &vrrp_mcast_group6_handler); install_keyword("vrrp_garp_master_delay", &vrrp_garp_delay_handler); install_keyword("vrrp_garp_master_repeat", &vrrp_garp_rep_handler); install_keyword("vrrp_garp_master_refresh", &vrrp_garp_refresh_handler); install_keyword("vrrp_garp_master_refresh_repeat", &vrrp_garp_refresh_rep_handler); install_keyword("vrrp_garp_lower_prio_delay", &vrrp_garp_lower_prio_delay_handler); install_keyword("vrrp_garp_lower_prio_repeat", &vrrp_garp_lower_prio_rep_handler); install_keyword("vrrp_garp_interval", &vrrp_garp_interval_handler); install_keyword("vrrp_gna_interval", &vrrp_gna_interval_handler); install_keyword("vrrp_lower_prio_no_advert", &vrrp_lower_prio_no_advert_handler); install_keyword("vrrp_higher_prio_send_advert", &vrrp_higher_prio_send_advert_handler); install_keyword("vrrp_version", &vrrp_version_handler); install_keyword("vrrp_iptables", &vrrp_iptables_handler); #ifdef _HAVE_LIBIPSET_ install_keyword("vrrp_ipsets", &vrrp_ipsets_handler); #endif install_keyword("vrrp_check_unicast_src", &vrrp_check_unicast_src_handler); install_keyword("vrrp_skip_check_adv_addr", &vrrp_check_adv_addr_handler); install_keyword("vrrp_strict", &vrrp_strict_handler); install_keyword("vrrp_priority", &vrrp_prio_handler); install_keyword("vrrp_no_swap", &vrrp_no_swap_handler); #ifdef _HAVE_SCHED_RT_ install_keyword("vrrp_rt_priority", &vrrp_rt_priority_handler); #if HAVE_DECL_RLIMIT_RTTIME == 1 install_keyword("vrrp_rlimit_rtime", &vrrp_rt_rlimit_handler); #endif #endif #endif install_keyword("notify_fifo", &global_notify_fifo); install_keyword("notify_fifo_script", &global_notify_fifo_script); #ifdef _WITH_VRRP_ install_keyword("vrrp_notify_fifo", &vrrp_notify_fifo); install_keyword("vrrp_notify_fifo_script", &vrrp_notify_fifo_script); #endif #ifdef _WITH_LVS_ install_keyword("lvs_notify_fifo", &lvs_notify_fifo); install_keyword("lvs_notify_fifo_script", &lvs_notify_fifo_script); install_keyword("checker_priority", &checker_prio_handler); install_keyword("checker_no_swap", &checker_no_swap_handler); #ifdef _HAVE_SCHED_RT_ install_keyword("checker_rt_priority", &checker_rt_priority_handler); #if HAVE_DECL_RLIMIT_RTTIME == 1 install_keyword("checker_rlimit_rtime", &checker_rt_rlimit_handler); #endif #endif #endif #ifdef _WITH_BFD_ install_keyword("bfd_priority", &bfd_prio_handler); install_keyword("bfd_no_swap", &bfd_no_swap_handler); #ifdef _HAVE_SCHED_RT_ install_keyword("bfd_rt_priority", &bfd_rt_priority_handler); #if HAVE_DECL_RLIMIT_RTTIME == 1 install_keyword("bfd_rlimit_rtime", &bfd_rt_rlimit_handler); #endif #endif #endif #ifdef _WITH_SNMP_ install_keyword("snmp_socket", &snmp_socket_handler); install_keyword("enable_traps", &trap_handler); #ifdef _WITH_SNMP_VRRP_ install_keyword("enable_snmp_vrrp", &snmp_vrrp_handler); install_keyword("enable_snmp_keepalived", &snmp_vrrp_handler); /* Deprecated v2.0.0 */ #endif #ifdef _WITH_SNMP_RFC_ install_keyword("enable_snmp_rfc", &snmp_rfc_handler); #endif #ifdef _WITH_SNMP_RFCV2_ install_keyword("enable_snmp_rfcv2", &snmp_rfcv2_handler); #endif #ifdef _WITH_SNMP_RFCV3_ install_keyword("enable_snmp_rfcv3", &snmp_rfcv3_handler); #endif #ifdef _WITH_SNMP_CHECKER_ install_keyword("enable_snmp_checker", &snmp_checker_handler); #endif #endif #ifdef _WITH_DBUS_ install_keyword("enable_dbus", &enable_dbus_handler); install_keyword("dbus_service_name", &dbus_service_name_handler); #endif install_keyword("script_user", &script_user_handler); install_keyword("enable_script_security", &script_security_handler); #ifdef _WITH_VRRP_ install_keyword("vrrp_netlink_cmd_rcv_bufs", &vrrp_netlink_cmd_rcv_bufs_handler); install_keyword("vrrp_netlink_cmd_rcv_bufs_force", &vrrp_netlink_cmd_rcv_bufs_force_handler); install_keyword("vrrp_netlink_monitor_rcv_bufs", &vrrp_netlink_monitor_rcv_bufs_handler); install_keyword("vrrp_netlink_monitor_rcv_bufs_force", &vrrp_netlink_monitor_rcv_bufs_force_handler); #endif #ifdef _WITH_LVS_ install_keyword("lvs_netlink_cmd_rcv_bufs", &lvs_netlink_cmd_rcv_bufs_handler); install_keyword("lvs_netlink_cmd_rcv_bufs_force", &lvs_netlink_cmd_rcv_bufs_force_handler); install_keyword("lvs_netlink_monitor_rcv_bufs", &lvs_netlink_monitor_rcv_bufs_handler); install_keyword("lvs_netlink_monitor_rcv_bufs_force", &lvs_netlink_monitor_rcv_bufs_force_handler); #endif #ifdef _WITH_LVS_ install_keyword("rs_init_notifies", &rs_init_notifies_handler); install_keyword("no_checker_emails", &no_checker_emails_handler); #endif #ifdef _WITH_VRRP_ install_keyword("vrrp_rx_bufs_policy", &vrrp_rx_bufs_policy_handler); install_keyword("vrrp_rx_bufs_multiplier", &vrrp_rx_bufs_multiplier_handler); #endif }
1
[ "CWE-200" ]
keepalived
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
135,813,184,844,637,680,000,000,000,000,000,000,000
142
Add command line and configuration option to set umask Issue #1048 identified that files created by keepalived are created with mode 0666. This commit changes the default to 0644, and also allows the umask to be specified in the configuration or as a command line option. Signed-off-by: Quentin Armitage <[email protected]>
Perl_croak(pTHX_ const char *pat, ...) { va_list args; va_start(args, pat); vcroak(pat, &args); NOT_REACHED; /* NOTREACHED */ va_end(args); }
0
[ "CWE-119", "CWE-703", "CWE-787" ]
perl5
34716e2a6ee2af96078d62b065b7785c001194be
318,731,701,106,081,130,000,000,000,000,000,000,000
8
Perl_my_setenv(); handle integer wrap RT #133204 Wean this function off int/I32 and onto UV/Size_t. Also, replace all malloc-ish calls with a wrapper that does overflow checks, In particular, it was doing (nlen + vlen + 2) which could wrap when the combined length of the environment variable name and value exceeded around 0x7fffffff. The wrapper check function is probably overkill, but belt and braces... NB this function has several variant parts, #ifdef'ed by platform type; I have blindly changed the parts that aren't compiled under linux.
static int __find_bmc_guid(struct device *dev, void *data) { guid_t *guid = data; struct bmc_device *bmc; int rv; if (dev->type != &bmc_device_type) return 0; bmc = to_bmc_device(dev); rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); if (rv) rv = kref_get_unless_zero(&bmc->usecount); return rv; }
0
[ "CWE-416", "CWE-284" ]
linux
77f8269606bf95fcb232ee86f6da80886f1dfae8
66,496,724,342,950,980,000,000,000,000,000,000,000
15
ipmi: fix use-after-free of user->release_barrier.rda When we do the following test, we got oops in ipmi_msghandler driver while((1)) do service ipmievd restart & service ipmievd restart done --------------------------------------------------------------- [ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008 [ 294.230188] Mem abort info: [ 294.230190] ESR = 0x96000004 [ 294.230191] Exception class = DABT (current EL), IL = 32 bits [ 294.230193] SET = 0, FnV = 0 [ 294.230194] EA = 0, S1PTW = 0 [ 294.230195] Data abort info: [ 294.230196] ISV = 0, ISS = 0x00000004 [ 294.230197] CM = 0, WnR = 0 [ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a [ 294.230201] [0000803fea6ea008] pgd=0000000000000000 [ 294.230204] Internal error: Oops: 96000004 [#1] SMP [ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio [ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113 [ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017 [ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO) [ 294.297695] pc : __srcu_read_lock+0x38/0x58 [ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.307853] sp : ffff00001001bc80 [ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000 [ 294.316594] x27: 0000000000000000 x26: dead000000000100 [ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800 [ 294.327366] x23: 0000000000000000 x22: 0000000000000000 [ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018 [ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000 [ 294.343523] x17: 0000000000000000 x16: 0000000000000000 [ 294.348908] x15: 0000000000000000 x14: 0000000000000002 [ 294.354293] x13: 0000000000000000 x12: 0000000000000000 [ 294.359679] x11: 0000000000000000 x10: 0000000000100000 [ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004 [ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678 [ 294.375836] x5 : 000000000000000c x4 : 0000000000000000 [ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000 [ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001 [ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293) [ 294.398791] Call trace: [ 294.401266] __srcu_read_lock+0x38/0x58 [ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler] [ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler] [ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler] [ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler] [ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler] [ 294.451618] tasklet_action_common.isra.5+0x88/0x138 [ 294.460661] tasklet_action+0x2c/0x38 [ 294.468191] __do_softirq+0x120/0x2f8 [ 294.475561] irq_exit+0x134/0x140 [ 294.482445] __handle_domain_irq+0x6c/0xc0 [ 294.489954] gic_handle_irq+0xb8/0x178 [ 294.497037] el1_irq+0xb0/0x140 [ 294.503381] arch_cpu_idle+0x34/0x1a8 [ 294.510096] do_idle+0x1d4/0x290 [ 294.516322] cpu_startup_entry+0x28/0x30 [ 294.523230] secondary_start_kernel+0x184/0x1d0 [ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25) [ 294.539746] ---[ end trace 8a7a880dee570b29 ]--- [ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt [ 294.556837] SMP: stopping secondary CPUs [ 294.563996] Kernel Offset: disabled [ 294.570515] CPU features: 0x002,21006008 [ 294.577638] Memory Limit: none [ 294.587178] Starting crashdump kernel... [ 294.594314] Bye! Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda in __srcu_read_lock(), it causes oops. Fix this by calling cleanup_srcu_struct() when the refcount is zero. Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove") Cc: [email protected] # 4.18 Signed-off-by: Yang Yingliang <[email protected]> Signed-off-by: Corey Minyard <[email protected]>
upnp_get_portmappings_in_range(unsigned short startport, unsigned short endport, const char * protocol, unsigned int * number) { int proto; proto = proto_atoi(protocol); if(!number) return NULL; return get_portmappings_in_range(startport, endport, proto, number); }
0
[ "CWE-476" ]
miniupnp
f321c2066b96d18afa5158dfa2d2873a2957ef38
235,898,216,708,656,680,000,000,000,000,000,000,000
11
upnp_redirect(): accept NULL desc argument
gst_riff_wavext_get_default_channel_mask (guint nchannels) { guint32 channel_mask = 0; /* Set the default channel mask for the given number of channels. * http://www.microsoft.com/whdc/device/audio/multichaud.mspx */ switch (nchannels) { case 11: channel_mask |= 0x00400; channel_mask |= 0x00200; case 9: channel_mask |= 0x00100; case 8: channel_mask |= 0x00080; channel_mask |= 0x00040; case 6: channel_mask |= 0x00020; channel_mask |= 0x00010; case 4: channel_mask |= 0x00008; case 3: channel_mask |= 0x00004; case 2: channel_mask |= 0x00002; channel_mask |= 0x00001; break; } return channel_mask; }
0
[ "CWE-369" ]
gst-plugins-base
81d3ba3fa212bb25fe2ac661993887c4b69af6f1
315,538,930,120,460,520,000,000,000,000,000,000,000
31
riff-media: Check for valid channels/rate before using the values Otherwise we might divide by zero or otherwise create invalid caps. https://bugzilla.gnome.org/show_bug.cgi?id=777262
void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) { struct rds_message *rm, *tmp; struct rds_connection *conn; unsigned long flags; LIST_HEAD(list); /* get all the messages we're dropping under the rs lock */ spin_lock_irqsave(&rs->rs_lock, flags); list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { if (dest && (dest->sin_addr.s_addr != rm->m_daddr || dest->sin_port != rm->m_inc.i_hdr.h_dport)) continue; list_move(&rm->m_sock_item, &list); rds_send_sndbuf_remove(rs, rm); clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); } /* order flag updates with the rs lock */ smp_mb__after_atomic(); spin_unlock_irqrestore(&rs->rs_lock, flags); if (list_empty(&list)) return; /* Remove the messages from the conn */ list_for_each_entry(rm, &list, m_sock_item) { conn = rm->m_inc.i_conn; spin_lock_irqsave(&conn->c_lock, flags); /* * Maybe someone else beat us to removing rm from the conn. * If we race with their flag update we'll get the lock and * then really see that the flag has been cleared. */ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { spin_unlock_irqrestore(&conn->c_lock, flags); spin_lock_irqsave(&rm->m_rs_lock, flags); rm->m_rs = NULL; spin_unlock_irqrestore(&rm->m_rs_lock, flags); continue; } list_del_init(&rm->m_conn_item); spin_unlock_irqrestore(&conn->c_lock, flags); /* * Couldn't grab m_rs_lock in top loop (lock ordering), * but we can now. */ spin_lock_irqsave(&rm->m_rs_lock, flags); spin_lock(&rs->rs_lock); __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); spin_unlock(&rs->rs_lock); rm->m_rs = NULL; spin_unlock_irqrestore(&rm->m_rs_lock, flags); rds_message_put(rm); } rds_wake_sk_sleep(rs); while (!list_empty(&list)) { rm = list_entry(list.next, struct rds_message, m_sock_item); list_del_init(&rm->m_sock_item); rds_message_wait(rm); /* just in case the code above skipped this message * because RDS_MSG_ON_CONN wasn't set, run it again here * taking m_rs_lock is the only thing that keeps us * from racing with ack processing. */ spin_lock_irqsave(&rm->m_rs_lock, flags); spin_lock(&rs->rs_lock); __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); spin_unlock(&rs->rs_lock); rm->m_rs = NULL; spin_unlock_irqrestore(&rm->m_rs_lock, flags); rds_message_put(rm); } }
0
[ "CWE-362" ]
linux
8c7188b23474cca017b3ef354c4a58456f68303a
211,517,155,219,467,400,000,000,000,000,000,000,000
89
RDS: fix race condition when sending a message on unbound socket Sasha's found a NULL pointer dereference in the RDS connection code when sending a message to an apparently unbound socket. The problem is caused by the code checking if the socket is bound in rds_sendmsg(), which checks the rs_bound_addr field without taking a lock on the socket. This opens a race where rs_bound_addr is temporarily set but where the transport is not in rds_bind(), leading to a NULL pointer dereference when trying to dereference 'trans' in __rds_conn_create(). Vegard wrote a reproducer for this issue, so kindly ask him to share if you're interested. I cannot reproduce the NULL pointer dereference using Vegard's reproducer with this patch, whereas I could without. Complete earlier incomplete fix to CVE-2015-6937: 74e98eb08588 ("RDS: verify the underlying transport exists before creating a connection") Cc: David S. Miller <[email protected]> Cc: [email protected] Reviewed-by: Vegard Nossum <[email protected]> Reviewed-by: Sasha Levin <[email protected]> Acked-by: Santosh Shilimkar <[email protected]> Signed-off-by: Quentin Casasnovas <[email protected]> Signed-off-by: David S. Miller <[email protected]>
TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatchOnDotted) { // true means multikey addIndex(BSON("a.b.x" << 1 << "a.b.y" << 1), true); runQuery(fromjson("{'a.b': {$elemMatch: {x: 1, y: 1}}}")); assertNumSolutions(2U); assertSolutionExists("{cscan: {dir: 1}}"); assertSolutionExists( "{fetch: {node: {ixscan: {pattern: {'a.b.x':1,'a.b.y':1}, bounds: " "{'a.b.x': [[1,1,true,true]], " " 'a.b.y': [[1,1,true,true]]}}}}}"); }
0
[ "CWE-834" ]
mongo
94d0e046baa64d1aa1a6af97e2d19bb466cc1ff5
316,393,075,312,563,500,000,000,000,000,000,000,000
12
SERVER-38164 $or pushdown optimization does not correctly handle $not within an $elemMatch
template<typename t> CImg<T>& blur_anisotropic(const CImg<t>& G, const float amplitude=60, const float dl=0.8f, const float da=30, const float gauss_prec=2, const unsigned int interpolation_type=0, const bool is_fast_approx=1) { // Check arguments and init variables if (!is_sameXYZ(G) || (G._spectrum!=3 && G._spectrum!=6)) throw CImgArgumentException(_cimg_instance "blur_anisotropic(): Invalid specified diffusion tensor field (%u,%u,%u,%u,%p).", cimg_instance, G._width,G._height,G._depth,G._spectrum,G._data); if (is_empty() || dl<0) return *this; const float namplitude = amplitude>=0?amplitude:-amplitude*cimg::max(_width,_height,_depth)/100; unsigned int iamplitude = cimg::round(namplitude); const bool is_3d = (G._spectrum==6); T val_min, val_max = max_min(val_min); _cimg_abort_init_omp; cimg_abort_init; if (da<=0) { // Iterated oriented Laplacians CImg<Tfloat> velocity(_width,_height,_depth,_spectrum); for (unsigned int iteration = 0; iteration<iamplitude; ++iteration) { Tfloat *ptrd = velocity._data, veloc_max = 0; if (is_3d) // 3D version cimg_forC(*this,c) { cimg_abort_test; CImg_3x3x3(I,Tfloat); cimg_for3x3x3(*this,x,y,z,c,I,Tfloat) { const Tfloat ixx = Incc + Ipcc - 2*Iccc, ixy = (Innc + Ippc - Inpc - Ipnc)/4, ixz = (Incn + Ipcp - Incp - Ipcn)/4, iyy = Icnc + Icpc - 2*Iccc, iyz = (Icnn + Icpp - Icnp - Icpn)/4, izz = Iccn + Iccp - 2*Iccc, veloc = (Tfloat)(G(x,y,z,0)*ixx + 2*G(x,y,z,1)*ixy + 2*G(x,y,z,2)*ixz + G(x,y,z,3)*iyy + 2*G(x,y,z,4)*iyz + G(x,y,z,5)*izz); *(ptrd++) = veloc; if (veloc>veloc_max) veloc_max = veloc; else if (-veloc>veloc_max) veloc_max = -veloc; } } else // 2D version cimg_forZC(*this,z,c) { cimg_abort_test; CImg_3x3(I,Tfloat); cimg_for3x3(*this,x,y,z,c,I,Tfloat) { const Tfloat ixx = Inc + Ipc - 2*Icc, ixy = (Inn + Ipp - Inp - Ipn)/4, iyy = Icn + Icp - 2*Icc, veloc = (Tfloat)(G(x,y,0,0)*ixx + 2*G(x,y,0,1)*ixy + G(x,y,0,2)*iyy); *(ptrd++) = veloc; if (veloc>veloc_max) veloc_max = veloc; else if (-veloc>veloc_max) veloc_max = -veloc; } } if (veloc_max>0) *this+=(velocity*=dl/veloc_max); } } else { // LIC-based smoothing const ulongT whd = (ulongT)_width*_height*_depth; const float sqrt2amplitude = (float)std::sqrt(2*namplitude); const int dx1 = width() - 1, dy1 = height() - 1, dz1 = depth() - 1; CImg<Tfloat> res(_width,_height,_depth,_spectrum,0), W(_width,_height,_depth,is_3d?4:3), val(_spectrum,1,1,1,0); int N = 0; if (is_3d) { // 3D version for (float phi = cimg::mod(180.f,da)/2.f; phi<=180; phi+=da) { const float phir = (float)(phi*cimg::PI/180), datmp = (float)(da/std::cos(phir)), da2 = datmp<1?360.f:datmp; for (float theta = 0; theta<360; (theta+=da2),++N) { const float thetar = (float)(theta*cimg::PI/180), vx = (float)(std::cos(thetar)*std::cos(phir)), vy = (float)(std::sin(thetar)*std::cos(phir)), vz = (float)std::sin(phir); const t *pa = G.data(0,0,0,0), *pb = G.data(0,0,0,1), *pc = G.data(0,0,0,2), *pd = G.data(0,0,0,3), *pe = G.data(0,0,0,4), *pf = G.data(0,0,0,5); Tfloat *pd0 = W.data(0,0,0,0), *pd1 = W.data(0,0,0,1), *pd2 = W.data(0,0,0,2), *pd3 = W.data(0,0,0,3); cimg_forXYZ(G,xg,yg,zg) { const t a = *(pa++), b = *(pb++), c = *(pc++), d = *(pd++), e = *(pe++), f = *(pf++); const float u = (float)(a*vx + b*vy + c*vz), v = (float)(b*vx + d*vy + e*vz), w = (float)(c*vx + e*vy + f*vz), n = 1e-5f + cimg::hypot(u,v,w), dln = dl/n; *(pd0++) = (Tfloat)(u*dln); *(pd1++) = (Tfloat)(v*dln); *(pd2++) = (Tfloat)(w*dln); *(pd3++) = (Tfloat)n; } cimg_abort_test; cimg_pragma_openmp(parallel for cimg_openmp_collapse(2) cimg_openmp_if(_width>=(cimg_openmp_sizefactor)*256 && _height*_depth>=2) firstprivate(val)) cimg_forYZ(*this,y,z) _cimg_abort_try_omp2 { cimg_abort_test2; cimg_forX(*this,x) { val.fill(0); const float n = (float)W(x,y,z,3), fsigma = (float)(n*sqrt2amplitude), fsigma2 = 2*fsigma*fsigma, length = gauss_prec*fsigma; float S = 0, X = (float)x, Y = (float)y, Z = (float)z; switch (interpolation_type) { case 0 : { // Nearest neighbor for (float l = 0; l<length && X>=0 && X<=dx1 && Y>=0 && Y<=dy1 && Z>=0 && Z<=dz1; l+=dl) { const int cx = (int)(X + 0.5f), cy = (int)(Y + 0.5f), cz = (int)(Z + 0.5f); const float u = (float)W(cx,cy,cz,0), v = (float)W(cx,cy,cz,1), w = (float)W(cx,cy,cz,2); if (is_fast_approx) { cimg_forC(*this,c) val[c]+=(Tfloat)(*this)(cx,cy,cz,c); ++S; } else { const float coef = (float)std::exp(-l*l/fsigma2); cimg_forC(*this,c) val[c]+=(Tfloat)(coef*(*this)(cx,cy,cz,c)); S+=coef; } X+=u; Y+=v; Z+=w; } } break; case 1 : { // Linear interpolation for (float l = 0; l<length && X>=0 && X<=dx1 && Y>=0 && Y<=dy1 && Z>=0 && Z<=dz1; l+=dl) { const float u = (float)(W._linear_atXYZ(X,Y,Z,0)), v = (float)(W._linear_atXYZ(X,Y,Z,1)), w = (float)(W._linear_atXYZ(X,Y,Z,2)); if (is_fast_approx) { cimg_forC(*this,c) val[c]+=(Tfloat)_linear_atXYZ(X,Y,Z,c); ++S; } else { const float coef = (float)std::exp(-l*l/fsigma2); cimg_forC(*this,c) val[c]+=(Tfloat)(coef*_linear_atXYZ(X,Y,Z,c)); S+=coef; } X+=u; Y+=v; Z+=w; } } break; default : { // 2nd order Runge Kutta for (float l = 0; l<length && X>=0 && X<=dx1 && Y>=0 && Y<=dy1 && Z>=0 && Z<=dz1; l+=dl) { const float u0 = (float)(0.5f*W._linear_atXYZ(X,Y,Z,0)), v0 = (float)(0.5f*W._linear_atXYZ(X,Y,Z,1)), w0 = (float)(0.5f*W._linear_atXYZ(X,Y,Z,2)), u = (float)(W._linear_atXYZ(X + u0,Y + v0,Z + w0,0)), v = (float)(W._linear_atXYZ(X + u0,Y + v0,Z + w0,1)), w = (float)(W._linear_atXYZ(X + u0,Y + v0,Z + w0,2)); if (is_fast_approx) { cimg_forC(*this,c) val[c]+=(Tfloat)_linear_atXYZ(X,Y,Z,c); ++S; } else { const float coef = (float)std::exp(-l*l/fsigma2); cimg_forC(*this,c) val[c]+=(Tfloat)(coef*_linear_atXYZ(X,Y,Z,c)); S+=coef; } X+=u; Y+=v; Z+=w; } } break; } Tfloat *ptrd = res.data(x,y,z); if (S>0) cimg_forC(res,c) { *ptrd+=val[c]/S; ptrd+=whd; } else cimg_forC(res,c) { *ptrd+=(Tfloat)((*this)(x,y,z,c)); ptrd+=whd; } } } _cimg_abort_catch_omp2 } } } else { // 2D LIC algorithm for (float theta = cimg::mod(360.f,da)/2.f; theta<360; (theta+=da),++N) { const float thetar = (float)(theta*cimg::PI/180), vx = (float)(std::cos(thetar)), vy = (float)(std::sin(thetar)); const t *pa = G.data(0,0,0,0), *pb = G.data(0,0,0,1), *pc = G.data(0,0,0,2); Tfloat *pd0 = W.data(0,0,0,0), *pd1 = W.data(0,0,0,1), *pd2 = W.data(0,0,0,2); cimg_forXY(G,xg,yg) { const t a = *(pa++), b = *(pb++), c = *(pc++); const float u = (float)(a*vx + b*vy), v = (float)(b*vx + c*vy), n = std::max(1e-5f,cimg::hypot(u,v)), dln = dl/n; *(pd0++) = (Tfloat)(u*dln); *(pd1++) = (Tfloat)(v*dln); *(pd2++) = (Tfloat)n; } cimg_abort_test; cimg_pragma_openmp(parallel for cimg_openmp_if(_width>=(cimg_openmp_sizefactor)*256 && _height>=2) firstprivate(val)) cimg_forY(*this,y) _cimg_abort_try_omp2 { cimg_abort_test2; cimg_forX(*this,x) { val.fill(0); const float n = (float)W(x,y,0,2), fsigma = (float)(n*sqrt2amplitude), fsigma2 = 2*fsigma*fsigma, length = gauss_prec*fsigma; float S = 0, X = (float)x, Y = (float)y; switch (interpolation_type) { case 0 : { // Nearest-neighbor for (float l = 0; l<length && X>=0 && X<=dx1 && Y>=0 && Y<=dy1; l+=dl) { const int cx = (int)(X + 0.5f), cy = (int)(Y + 0.5f); const float u = (float)W(cx,cy,0,0), v = (float)W(cx,cy,0,1); if (is_fast_approx) { cimg_forC(*this,c) val[c]+=(Tfloat)(*this)(cx,cy,0,c); ++S; } else { const float coef = (float)std::exp(-l*l/fsigma2); cimg_forC(*this,c) val[c]+=(Tfloat)(coef*(*this)(cx,cy,0,c)); S+=coef; } X+=u; Y+=v; } } break; case 1 : { // Linear interpolation for (float l = 0; l<length && X>=0 && X<=dx1 && Y>=0 && Y<=dy1; l+=dl) { const float u = (float)(W._linear_atXY(X,Y,0,0)), v = (float)(W._linear_atXY(X,Y,0,1)); if (is_fast_approx) { cimg_forC(*this,c) val[c]+=(Tfloat)_linear_atXY(X,Y,0,c); ++S; } else { const float coef = (float)std::exp(-l*l/fsigma2); cimg_forC(*this,c) val[c]+=(Tfloat)(coef*_linear_atXY(X,Y,0,c)); S+=coef; } X+=u; Y+=v; } } break; default : { // 2nd-order Runge-kutta interpolation for (float l = 0; l<length && X>=0 && X<=dx1 && Y>=0 && Y<=dy1; l+=dl) { const float u0 = (float)(0.5f*W._linear_atXY(X,Y,0,0)), v0 = (float)(0.5f*W._linear_atXY(X,Y,0,1)), u = (float)(W._linear_atXY(X + u0,Y + v0,0,0)), v = (float)(W._linear_atXY(X + u0,Y + v0,0,1)); if (is_fast_approx) { cimg_forC(*this,c) val[c]+=(Tfloat)_linear_atXY(X,Y,0,c); ++S; } else { const float coef = (float)std::exp(-l*l/fsigma2); cimg_forC(*this,c) val[c]+=(Tfloat)(coef*_linear_atXY(X,Y,0,c)); S+=coef; } X+=u; Y+=v; } } } Tfloat *ptrd = res.data(x,y); if (S>0) cimg_forC(res,c) { *ptrd+=val[c]/S; ptrd+=whd; } else cimg_forC(res,c) { *ptrd+=(Tfloat)((*this)(x,y,0,c)); ptrd+=whd; } } } _cimg_abort_catch_omp2 } } const Tfloat *ptrs = res._data; cimg_for(*this,ptrd,T) { const Tfloat _val = *(ptrs++)/N; *ptrd = _val<val_min?val_min:(_val>val_max?val_max:(T)_val); } } cimg_abort_test; return *this;
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
81,101,287,126,837,420,000,000,000,000,000,000,000
269
.
void free_contig_range(unsigned long pfn, unsigned nr_pages) { unsigned int count = 0; for (; nr_pages--; pfn++) { struct page *page = pfn_to_page(pfn); count += page_count(page) != 1; __free_page(page); } WARN(count != 0, "%d pages are still in use!\n", count); }
0
[]
linux
400e22499dd92613821374c8c6c88c7225359980
68,002,967,260,678,850,000,000,000,000,000,000,000
12
mm: don't warn about allocations which stall for too long Commit 63f53dea0c98 ("mm: warn about allocations which stall for too long") was a great step for reducing possibility of silent hang up problem caused by memory allocation stalls. But this commit reverts it, for it is possible to trigger OOM lockup and/or soft lockups when many threads concurrently called warn_alloc() (in order to warn about memory allocation stalls) due to current implementation of printk(), and it is difficult to obtain useful information due to limitation of synchronous warning approach. Current printk() implementation flushes all pending logs using the context of a thread which called console_unlock(). printk() should be able to flush all pending logs eventually unless somebody continues appending to printk() buffer. Since warn_alloc() started appending to printk() buffer while waiting for oom_kill_process() to make forward progress when oom_kill_process() is processing pending logs, it became possible for warn_alloc() to force oom_kill_process() loop inside printk(). As a result, warn_alloc() significantly increased possibility of preventing oom_kill_process() from making forward progress. ---------- Pseudo code start ---------- Before warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } goto retry; After warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else if (waited_for_10seconds()) { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- Although waited_for_10seconds() becomes true once per 10 seconds, unbounded number of threads can call waited_for_10seconds() at the same time. Also, since threads doing waited_for_10seconds() keep doing almost busy loop, the thread doing print_one_log() can use little CPU resource. Therefore, this situation can be simplified like ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- when printk() is called faster than print_one_log() can process a log. One of possible mitigation would be to introduce a new lock in order to make sure that no other series of printk() (either oom_kill_process() or warn_alloc()) can append to printk() buffer when one series of printk() (either oom_kill_process() or warn_alloc()) is already in progress. Such serialization will also help obtaining kernel messages in readable form. ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { mutex_lock(&oom_printk_lock); while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_printk_lock); mutex_unlock(&oom_lock) } else { if (mutex_trylock(&oom_printk_lock)) { atomic_inc(&printk_pending_logs); mutex_unlock(&oom_printk_lock); } } goto retry; ---------- Pseudo code end ---------- But this commit does not go that direction, for we don't want to introduce a new lock dependency, and we unlikely be able to obtain useful information even if we serialized oom_kill_process() and warn_alloc(). Synchronous approach is prone to unexpected results (e.g. too late [1], too frequent [2], overlooked [3]). As far as I know, warn_alloc() never helped with providing information other than "something is going wrong". I want to consider asynchronous approach which can obtain information during stalls with possibly relevant threads (e.g. the owner of oom_lock and kswapd-like threads) and serve as a trigger for actions (e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump of stalling KVM guest for diagnostic purpose). This commit temporarily loses ability to report e.g. OOM lockup due to unable to invoke the OOM killer due to !__GFP_FS allocation request. But asynchronous approach will be able to detect such situation and emit warning. Thus, let's remove warn_alloc(). [1] https://bugzilla.kernel.org/show_bug.cgi?id=192981 [2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com [3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever")) Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <[email protected]> Reported-by: Cong Wang <[email protected]> Reported-by: yuwang.yuwang <[email protected]> Reported-by: Johannes Weiner <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Steven Rostedt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
bool DL_Dxf::handleHatchData(DL_CreationInterface* creationInterface) { // New polyline loop, group code 92 // or new loop with individual edges, group code 93 if (groupCode==92 || groupCode==93) { if (firstHatchLoop) { hatchEdges.clear(); firstHatchLoop = false; } if (groupCode==92 && (toInt(groupValue)&2)==2) { addHatchLoop(); } if (groupCode==93) { addHatchLoop(); } return true; } // New hatch edge or new section / entity: add last hatch edge: if (groupCode==72 || groupCode==0 || groupCode==78 || groupCode==98) { // polyline boundaries use code 72 for bulge flag: if (groupCode!=72 || (getIntValue(92, 0)&2)==0) { addHatchEdge(); } if (groupCode==0 /*|| groupCode==78*/) { addHatch(creationInterface); } else { hatchEdge.type = toInt(groupValue); } return true; } // polyline boundary: if ((getIntValue(92, 0)&2)==2) { switch (groupCode) { case 10: hatchEdge.type = 0; hatchEdge.vertices.push_back(std::vector<double>()); hatchEdge.vertices.back().push_back(toReal(groupValue)); return true; case 20: if (!hatchEdge.vertices.empty()) { hatchEdge.vertices.back().push_back(toReal(groupValue)); hatchEdge.defined = true; } return true; case 42: if (!hatchEdge.vertices.empty()) { hatchEdge.vertices.back().push_back(toReal(groupValue)); hatchEdge.defined = true; } return true; } } else { // Line edge: if (hatchEdge.type==1) { switch (groupCode) { case 10: hatchEdge.x1 = toReal(groupValue); return true; case 20: hatchEdge.y1 = toReal(groupValue); return true; case 11: hatchEdge.x2 = toReal(groupValue); return true; case 21: hatchEdge.y2 = toReal(groupValue); hatchEdge.defined = true; return true; } } // Arc edge: if (hatchEdge.type==2) { switch(groupCode) { case 10: hatchEdge.cx = toReal(groupValue); return true; case 20: hatchEdge.cy = toReal(groupValue); return true; case 40: hatchEdge.radius = toReal(groupValue); return true; case 50: hatchEdge.angle1 = toReal(groupValue)/360.0*2*M_PI; return true; case 51: hatchEdge.angle2 = toReal(groupValue)/360.0*2*M_PI; return true; case 73: hatchEdge.ccw = (bool)toInt(groupValue); hatchEdge.defined = true; return true; } } // Ellipse arc edge: if (hatchEdge.type==3) { switch (groupCode) { case 10: hatchEdge.cx = toReal(groupValue); return true; case 20: hatchEdge.cy = toReal(groupValue); return true; case 11: hatchEdge.mx = toReal(groupValue); return true; case 21: hatchEdge.my = toReal(groupValue); return true; case 40: hatchEdge.ratio = toReal(groupValue); return true; case 50: hatchEdge.angle1 = toReal(groupValue)/360.0*2*M_PI; return true; case 51: hatchEdge.angle2 = toReal(groupValue)/360.0*2*M_PI; return true; case 73: hatchEdge.ccw = (bool)toInt(groupValue); hatchEdge.defined = true; return true; } } // Spline edge: if (hatchEdge.type==4) { switch (groupCode) { case 94: hatchEdge.degree = toInt(groupValue); return true; case 73: hatchEdge.rational = toBool(groupValue); return true; case 74: hatchEdge.periodic = toBool(groupValue); return true; case 95: hatchEdge.nKnots = toInt(groupValue); return true; case 96: hatchEdge.nControl = toInt(groupValue); return true; case 97: hatchEdge.nFit = toInt(groupValue); return true; case 40: if (hatchEdge.knots.size() < hatchEdge.nKnots) { hatchEdge.knots.push_back(toReal(groupValue)); } return true; case 10: if (hatchEdge.controlPoints.size() < hatchEdge.nControl) { std::vector<double> v; v.push_back(toReal(groupValue)); hatchEdge.controlPoints.push_back(v); } return true; case 20: if (!hatchEdge.controlPoints.empty() && hatchEdge.controlPoints.back().size()==1) { hatchEdge.controlPoints.back().push_back(toReal(groupValue)); } hatchEdge.defined = true; return true; case 42: if (hatchEdge.weights.size() < hatchEdge.nControl) { hatchEdge.weights.push_back(toReal(groupValue)); } return true; case 11: if (hatchEdge.fitPoints.size() < hatchEdge.nFit) { std::vector<double> v; v.push_back(toReal(groupValue)); hatchEdge.fitPoints.push_back(v); } return true; case 21: if (!hatchEdge.fitPoints.empty() && hatchEdge.fitPoints.back().size()==1) { hatchEdge.fitPoints.back().push_back(toReal(groupValue)); } hatchEdge.defined = true; return true; case 12: hatchEdge.startTangentX = toReal(groupValue); return true; case 22: hatchEdge.startTangentY = toReal(groupValue); return true; case 13: hatchEdge.endTangentX = toReal(groupValue); return true; case 23: hatchEdge.endTangentY = toReal(groupValue); return true; } } } return false; }
0
[ "CWE-191" ]
qcad
1eeffc5daf5a06cf6213ffc19e95923cdebb2eb8
314,045,846,827,213,330,000,000,000,000,000,000,000
206
check vertexIndex which might be -1 for broken DXF
DEFUN (neighbor_maximum_prefix_restart, neighbor_maximum_prefix_restart_cmd, NEIGHBOR_CMD2 "maximum-prefix <1-4294967295> restart <1-65535>", NEIGHBOR_STR NEIGHBOR_ADDR_STR2 "Maximum number of prefix accept from this peer\n" "maximum no. of prefix limit\n" "Restart bgp connection after limit is exceeded\n" "Restart interval in minutes") { return peer_maximum_prefix_set_vty (vty, argv[0], bgp_node_afi (vty), bgp_node_safi (vty), argv[1], NULL, 0, argv[2]); }
0
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
146,614,424,177,955,150,000,000,000,000,000,000,000
13
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <[email protected]> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
dev_t mnt_fs_get_devno(struct libmnt_fs *fs) { return fs ? fs->devno : 0; }
0
[ "CWE-552", "CWE-703" ]
util-linux
166e87368ae88bf31112a30e078cceae637f4cdb
204,210,933,368,632,940,000,000,000,000,000,000,000
4
libmount: remove support for deleted mount table entries The "(deleted)" suffix has been originally used by kernel for deleted mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3 (Dec 2014) kernel does not use this suffix for mount stuff in /proc at all. Let's remove this support from libmount too. Signed-off-by: Karel Zak <[email protected]>
static void uas_zap_pending(struct uas_dev_info *devinfo, int result) { struct uas_cmd_info *cmdinfo; struct scsi_cmnd *cmnd; unsigned long flags; int i, err; spin_lock_irqsave(&devinfo->lock, flags); for (i = 0; i < devinfo->qdepth; i++) { if (!devinfo->cmnd[i]) continue; cmnd = devinfo->cmnd[i]; cmdinfo = (void *)&cmnd->SCp; uas_log_cmd_state(cmnd, __func__, 0); /* Sense urbs were killed, clear COMMAND_INFLIGHT manually */ cmdinfo->state &= ~COMMAND_INFLIGHT; cmnd->result = result << 16; err = uas_try_complete(cmnd, __func__); WARN_ON(err != 0); } spin_unlock_irqrestore(&devinfo->lock, flags); }
0
[ "CWE-125" ]
linux
786de92b3cb26012d3d0f00ee37adf14527f35c4
172,395,188,211,292,000,000,000,000,000,000,000,000
23
USB: uas: fix bug in handling of alternate settings The uas driver has a subtle bug in the way it handles alternate settings. The uas_find_uas_alt_setting() routine returns an altsetting value (the bAlternateSetting number in the descriptor), but uas_use_uas_driver() then treats that value as an index to the intf->altsetting array, which it isn't. Normally this doesn't cause any problems because the various alternate settings have bAlternateSetting values 0, 1, 2, ..., so the value is equal to the index in the array. But this is not guaranteed, and Andrey Konovalov used the syzkaller fuzzer with KASAN to get a slab-out-of-bounds error by violating this assumption. This patch fixes the bug by making uas_find_uas_alt_setting() return a pointer to the altsetting entry rather than either the value or the index. Pointers are less subject to misinterpretation. Signed-off-by: Alan Stern <[email protected]> Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> CC: Oliver Neukum <[email protected]> CC: <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
rend_service_rendezvous_has_opened(origin_circuit_t *circuit) { rend_service_t *service; char buf[RELAY_PAYLOAD_SIZE]; crypt_path_t *hop; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; char hexcookie[9]; int reason; const char *rend_cookie, *rend_pk_digest; tor_assert(circuit->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND); tor_assert(circuit->cpath); tor_assert(circuit->build_state); assert_circ_anonymity_ok(circuit, get_options()); tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only one supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); rend_cookie = circuit->rend_data->rend_cookie; /* Declare the circuit dirty to avoid reuse, and for path-bias */ if (!circuit->base_.timestamp_dirty) circuit->base_.timestamp_dirty = time(NULL); /* This may be redundant */ pathbias_count_use_attempt(circuit); hop = circuit->build_state->service_pending_final_cpath_ref->cpath; base16_encode(hexcookie,9, rend_cookie,4); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); log_info(LD_REND, "Done building circuit %u to rendezvous with " "cookie %s for service %s", (unsigned)circuit->base_.n_circ_id, hexcookie, serviceid); circuit_log_path(LOG_INFO, LD_REND, circuit); /* Clear the 'in-progress HS circ has timed out' flag for * consistency with what happens on the client side; this line has * no effect on Tor's behaviour. */ circuit->hs_circ_has_timed_out = 0; /* If hop is NULL, another rend circ has already connected to this * rend point. Close this circ. */ if (hop == NULL) { log_info(LD_REND, "Another rend circ has already reached this rend point; " "closing this rend circ."); reason = END_CIRC_REASON_NONE; goto err; } /* Remove our final cpath element from the reference, so that no * other circuit will try to use it. Store it in * pending_final_cpath for now to ensure that it will be freed if * our rendezvous attempt fails. */ circuit->build_state->pending_final_cpath = hop; circuit->build_state->service_pending_final_cpath_ref->cpath = NULL; service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_GENERAL, "Internal error: unrecognized service ID on " "rendezvous circuit."); reason = END_CIRC_REASON_INTERNAL; goto err; } /* All we need to do is send a RELAY_RENDEZVOUS1 cell... */ memcpy(buf, rend_cookie, REND_COOKIE_LEN); if (crypto_dh_get_public(hop->rend_dh_handshake_state, buf+REND_COOKIE_LEN, DH_KEY_LEN)<0) { log_warn(LD_GENERAL,"Couldn't get DH public key."); reason = END_CIRC_REASON_INTERNAL; goto err; } memcpy(buf+REND_COOKIE_LEN+DH_KEY_LEN, hop->rend_circ_nonce, DIGEST_LEN); /* Send the cell */ if (relay_send_command_from_edge(0, TO_CIRCUIT(circuit), RELAY_COMMAND_RENDEZVOUS1, buf, REND_COOKIE_LEN+DH_KEY_LEN+DIGEST_LEN, circuit->cpath->prev)<0) { log_warn(LD_GENERAL, "Couldn't send RENDEZVOUS1 cell."); goto done; } crypto_dh_free(hop->rend_dh_handshake_state); hop->rend_dh_handshake_state = NULL; /* Append the cpath entry. */ hop->state = CPATH_STATE_OPEN; /* set the windows to default. these are the windows * that the service thinks the client has. */ hop->package_window = circuit_initial_package_window(); hop->deliver_window = CIRCWINDOW_START; onion_append_to_cpath(&circuit->cpath, hop); circuit->build_state->pending_final_cpath = NULL; /* prevent double-free */ /* Change the circuit purpose. */ circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_S_REND_JOINED); goto done; err: circuit_mark_for_close(TO_CIRCUIT(circuit), reason); done: memwipe(buf, 0, sizeof(buf)); memwipe(serviceid, 0, sizeof(serviceid)); memwipe(hexcookie, 0, sizeof(hexcookie)); return; }
0
[ "CWE-532" ]
tor
09ea89764a4d3a907808ed7d4fe42abfe64bd486
340,169,305,699,082,870,000,000,000,000,000,000,000
117
Fix log-uninitialized-stack bug in rend_service_intro_established. Fixes bug 23490; bugfix on 0.2.7.2-alpha. TROVE-2017-008 CVE-2017-0380
void vcpu_load(struct kvm_vcpu *vcpu) { int cpu = get_cpu(); preempt_notifier_register(&vcpu->preempt_notifier); kvm_arch_vcpu_load(vcpu, cpu); put_cpu(); }
0
[ "CWE-416", "CWE-362" ]
linux
cfa39381173d5f969daf43582c95ad679189cbc9
76,890,445,790,477,950,000,000,000,000,000,000,000
7
kvm: fix kvm_ioctl_create_device() reference counting (CVE-2019-6974) kvm_ioctl_create_device() does the following: 1. creates a device that holds a reference to the VM object (with a borrowed reference, the VM's refcount has not been bumped yet) 2. initializes the device 3. transfers the reference to the device to the caller's file descriptor table 4. calls kvm_get_kvm() to turn the borrowed reference to the VM into a real reference The ownership transfer in step 3 must not happen before the reference to the VM becomes a proper, non-borrowed reference, which only happens in step 4. After step 3, an attacker can close the file descriptor and drop the borrowed reference, which can cause the refcount of the kvm object to drop to zero. This means that we need to grab a reference for the device before anon_inode_getfd(), otherwise the VM can disappear from under us. Fixes: 852b6d57dc7f ("kvm: add device control API") Cc: [email protected] Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
XSetStandardProperties ( Display *dpy, Window w, /* window to decorate */ _Xconst char *name, /* name of application */ _Xconst char *icon_string,/* name string for icon */ Pixmap icon_pixmap, /* pixmap to use as icon, or None */ char **argv, /* command to be used to restart application */ int argc, /* count of arguments */ XSizeHints *hints) /* size hints for window in its normal state */ { XWMHints phints; phints.flags = 0; if (name != NULL) XStoreName (dpy, w, name); if (safestrlen(icon_string) >= USHRT_MAX) return 1; if (icon_string != NULL) { XChangeProperty (dpy, w, XA_WM_ICON_NAME, XA_STRING, 8, PropModeReplace, (_Xconst unsigned char *)icon_string, (int)safestrlen(icon_string)); } if (icon_pixmap != None) { phints.icon_pixmap = icon_pixmap; phints.flags |= IconPixmapHint; } if (argv != NULL) XSetCommand(dpy, w, argv, argc); if (hints != NULL) XSetNormalHints(dpy, w, hints); if (phints.flags != 0) XSetWMHints(dpy, w, &phints); return 1; }
0
[ "CWE-120" ]
libx11
8d2e02ae650f00c4a53deb625211a0527126c605
181,863,330,640,895,300,000,000,000,000,000,000,000
36
Reject string longer than USHRT_MAX before sending them on the wire The X protocol uses CARD16 values to represent the length so this would overflow. CVE-2021-31535 Signed-off-by: Matthieu Herrb <[email protected]>
double Magick::Image::compare(const Image &reference_,const MetricType metric_) { double distortion=0.0; GetPPException; GetImageDistortion(image(),reference_.constImage(),metric_,&distortion, exceptionInfo); ThrowImageException; return(distortion); }
0
[ "CWE-416" ]
ImageMagick
8c35502217c1879cb8257c617007282eee3fe1cc
171,999,628,673,637,200,000,000,000,000,000,000,000
11
Added missing return to avoid use after free.
static void cli_session_setup_gensec_remote_done(struct tevent_req *subreq) { struct tevent_req *req = tevent_req_callback_data(subreq, struct tevent_req); struct cli_session_setup_gensec_state *state = tevent_req_data(req, struct cli_session_setup_gensec_state); NTSTATUS status; TALLOC_FREE(state->inbuf); TALLOC_FREE(state->recv_iov); status = cli_sesssetup_blob_recv(subreq, state, &state->blob_in, &state->inbuf, &state->recv_iov); TALLOC_FREE(subreq); data_blob_free(&state->blob_out); if (!NT_STATUS_IS_OK(status) && !NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) { tevent_req_nterror(req, status); return; } if (NT_STATUS_IS_OK(status)) { struct smbXcli_session *session = NULL; bool is_guest = false; if (smbXcli_conn_protocol(state->cli->conn) >= PROTOCOL_SMB2_02) { session = state->cli->smb2.session; } else { session = state->cli->smb1.session; } is_guest = smbXcli_session_is_guest(session); if (is_guest) { /* * We can't finish the gensec handshake, we don't * have a negotiated session key. * * So just pretend we are completely done. */ state->blob_in = data_blob_null; state->local_ready = true; } state->remote_ready = true; } if (state->local_ready && state->remote_ready) { cli_session_setup_gensec_ready(req); return; } cli_session_setup_gensec_local_next(req); }
1
[ "CWE-94" ]
samba
94295b7aa22d2544af5323bca70d3dcb97fd7c64
202,220,836,302,688,500,000,000,000,000,000,000,000
56
CVE-2016-2019: s3:libsmb: add comment regarding smbXcli_session_is_guest() with mandatory signing BUG: https://bugzilla.samba.org/show_bug.cgi?id=11860 Signed-off-by: Stefan Metzmacher <[email protected]>
static void urlParsePostBody(struct URL *url, const struct HttpConnection *http, const char *buf, int len) { struct HashMap contentType; initHashMap(&contentType, urlDestroyHashMapEntry, NULL); const char *ctHeader = getFromHashMap(&http->header, "content-type"); urlParseHeaderLine(&contentType, ctHeader, ctHeader ? strlen(ctHeader) : 0); if (getRefFromHashMap(&contentType, "application/x-www-form-urlencoded")) { urlParseQueryString(&url->args, buf, len); } else if (getRefFromHashMap(&contentType, "multipart/form-data")) { const char *boundary = getFromHashMap(&contentType, "boundary"); if (boundary && *boundary) { const char *lastPart = NULL; for (const char *part = buf; len > 0; ) { const char *ptr; if ((part == buf && (ptr = urlMemstr(part, len, "--")) != NULL) || (ptr = urlMemstr(part, len, "\r\n--")) != NULL) { len -= ptr - part + (part == buf ? 2 : 4); part = ptr + (part == buf ? 2 : 4); if (!urlMemcmp(part, len, boundary)) { int i = strlen(boundary); len -= i; part += i; if (!urlMemcmp(part, len, "\r\n")) { len -= 2; part += 2; if (lastPart) { urlParsePart(url, lastPart, ptr - lastPart); } else { if (ptr != buf) { info("[http] Ignoring prologue before \"multipart/form-data\"!"); } } lastPart = part; } else if (!urlMemcmp(part, len, "--\r\n")) { len -= 4; part += 4; urlParsePart(url, lastPart, ptr - lastPart); lastPart = NULL; if (len > 0) { info("[http] Ignoring epilogue past end of \"multipart/" "form-data\"!"); } } } } } if (lastPart) { warn("[http] Missing final \"boundary\" for \"multipart/form-data\"!"); } } else { warn("[http] Missing \"boundary\" information for \"multipart/form-data\"!"); } } destroyHashMap(&contentType); }
1
[ "CWE-400", "CWE-703", "CWE-835" ]
shellinabox
4f0ecc31ac6f985e0dd3f5a52cbfc0e9251f6361
83,023,610,874,575,330,000,000,000,000,000,000,000
56
Rolling code for version 2.21
CImg<Tfloat> get_RGBtoxyY(const bool use_D65=true) const { return CImg<Tfloat>(*this,false).RGBtoxyY(use_D65); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
11,893,928,485,123,136,000,000,000,000,000,000,000
3
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, struct sockaddr __user *addrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); if (err) return err; else return assoc_id; }
0
[]
linux-2.6
5e739d1752aca4e8f3e794d431503bfca3162df4
71,179,100,039,829,800,000,000,000,000,000,000,000
14
sctp: fix potential panics in the SCTP-AUTH API. All of the SCTP-AUTH socket options could cause a panic if the extension is disabled and the API is envoked. Additionally, there were some additional assumptions that certain pointers would always be valid which may not always be the case. This patch hardens the API and address all of the crash scenarios. Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static bool pb_release_union_field(pb_istream_t *stream, pb_field_iter_t *iter) { pb_size_t old_tag = *(pb_size_t*)iter->pSize; /* Previous which_ value */ pb_size_t new_tag = iter->pos->tag; /* New which_ value */ if (old_tag == 0) return true; /* Ok, no old data in union */ if (old_tag == new_tag) return true; /* Ok, old data is of same type => merge */ /* Release old data. The find can fail if the message struct contains * invalid data. */ if (!pb_field_iter_find(iter, old_tag)) PB_RETURN_ERROR(stream, "invalid union tag"); pb_release_single_field(iter); /* Restore iterator to where it should be. * This shouldn't fail unless the pb_field_t structure is corrupted. */ if (!pb_field_iter_find(iter, new_tag)) PB_RETURN_ERROR(stream, "iterator error"); return true; }
0
[ "CWE-20", "CWE-119" ]
nanopb
4fe23595732b6f1254cfc11a9b8d6da900b55b0c
4,097,575,955,936,835,000,000,000,000,000,000,000
25
Fix memory leak with oneofs and PB_ENABLE_MALLOC (#615) Nanopb would leak memory when all of the following conditions were true: - PB_ENABLE_MALLOC is defined at the compile time - Message definitions contains an oneof field, the oneof contains a static submessage, and the static submessage contains a pointer field. - Data being decoded contains two values for the submessage. The logic in pb_release_union_field would detect that the same submessage occurs twice, and wouldn't release it because keeping the old values is necessary to match the C++ library behavior regarding message merges. But then decode_static_field() would go to memset() the whole submessage to zero, because it unconditionally assumed it to be uninitialized memory. This would normally happen when the contents of the union field is switched to a different oneof item, instead of merging with the same one. This commit changes it so that the field is memset() only when `which_field` contains a different tag.
void wc_ecc_del_point(ecc_point* p) { wc_ecc_del_point_h(p, NULL); }
0
[ "CWE-326", "CWE-203" ]
wolfssl
1de07da61f0c8e9926dcbd68119f73230dae283f
219,143,109,831,040,160,000,000,000,000,000,000,000
4
Constant time EC map to affine for private operations For fast math, use a constant time modular inverse when mapping to affine when operation involves a private key - key gen, calc shared secret, sign.
void PSOutputDev::writeTrailer() { PSOutCustomColor *cc; if (mode == psModeForm) { writePS("/Foo exch /Form defineresource pop\n"); } else { writePS("end\n"); writePS("%%DocumentSuppliedResources:\n"); writePS(embFontList->getCString()); if (level == psLevel1Sep || level == psLevel2Sep || level == psLevel3Sep) { writePS("%%DocumentProcessColors:"); if (processColors & psProcessCyan) { writePS(" Cyan"); } if (processColors & psProcessMagenta) { writePS(" Magenta"); } if (processColors & psProcessYellow) { writePS(" Yellow"); } if (processColors & psProcessBlack) { writePS(" Black"); } writePS("\n"); writePS("%%DocumentCustomColors:"); for (cc = customColors; cc; cc = cc->next) { writePSFmt(" ({0:s})", cc->name->getCString()); } writePS("\n"); writePS("%%CMYKCustomColor:\n"); for (cc = customColors; cc; cc = cc->next) { writePSFmt("%%+ {0:.4g} {1:.4g} {2:.4g} {3:.4g} ({4:t})\n", cc->c, cc->m, cc->y, cc->k, cc->name); } } } }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
262,828,739,998,871,650,000,000,000,000,000,000,000
38
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
enum block_op find_operand(const char *start) { char first= *start; char next= *(start+1); if (first == '=' && next == '=') return EQ_OP; if (first == '!' && next == '=') return NE_OP; if (first == '>' && next == '=') return GE_OP; if (first == '>') return GT_OP; if (first == '<' && next == '=') return LE_OP; if (first == '<') return LT_OP; return ILLEG_OP; }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
212,023,507,068,780,100,000,000,000,000,000,000,000
20
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
const char *gnu_basename(const char *path) { const char *last_slash = strrchr(path, '/'); if (!last_slash) return path; return last_slash+1; }
0
[ "CWE-284", "CWE-269" ]
firejail
903fd8a0789ca3cc3c21d84cd0282481515592ef
247,823,365,663,663,300,000,000,000,000,000,000,000
7
security fix
static inline bool context_copied(struct context_entry *context) { return !!(context->hi & (1ULL << 3)); }
0
[]
linux
d8b8591054575f33237556c32762d54e30774d28
294,322,411,369,158,700,000,000,000,000,000,000,000
4
iommu/vt-d: Disable ATS support on untrusted devices Commit fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted devices") disables ATS support on the devices which have been marked as untrusted. Unfortunately this is not enough to fix the DMA attack vulnerabiltiies because IOMMU driver allows translated requests as long as a device advertises the ATS capability. Hence a malicious peripheral device could use this to bypass IOMMU. This disables the ATS support on untrusted devices by clearing the internal per-device ATS mark. As the result, IOMMU driver will block any translated requests from any device marked as untrusted. Cc: Jacob Pan <[email protected]> Cc: Mika Westerberg <[email protected]> Suggested-by: Kevin Tian <[email protected]> Suggested-by: Ashok Raj <[email protected]> Fixes: fb58fdcd295b9 ("iommu/vt-d: Do not enable ATS for untrusted devices") Signed-off-by: Lu Baolu <[email protected]> Signed-off-by: Joerg Roedel <[email protected]>
Item_cache_wrapper::Item_cache_wrapper(THD *thd, Item *item_arg): Item_result_field(thd), orig_item(item_arg), expr_cache(NULL), expr_value(NULL) { DBUG_ASSERT(orig_item->fixed); Type_std_attributes::set(orig_item); maybe_null= orig_item->maybe_null; with_sum_func= orig_item->with_sum_func; with_param= orig_item->with_param; with_field= orig_item->with_field; name= item_arg->name; name_length= item_arg->name_length; with_subselect= orig_item->with_subselect; with_window_func= orig_item->with_window_func; if ((expr_value= Item_cache::get_cache(thd, orig_item))) expr_value->setup(thd, orig_item); fixed= 1; }
0
[ "CWE-89" ]
server
b5e16a6e0381b28b598da80b414168ce9a5016e5
24,579,469,167,937,654,000,000,000,000,000,000,000
19
MDEV-26061 MariaDB server crash at Field::set_default * Item_default_value::fix_fields creates a copy of its argument's field. * Field::default_value is changed when its expression is prepared in unpack_vcol_info_from_frm() This means we must unpack any vcol expression that includes DEFAULT(x) strictly after unpacking x->default_value. To avoid building and solving this dependency graph on every table open, we update Item_default_value::field->default_value after all vcols are unpacked and fixed.
void do_machine_check(struct pt_regs *regs, long error_code) { struct mca_config *cfg = &mca_cfg; struct mce m, *final; int i; int worst = 0; int severity; /* * Establish sequential order between the CPUs entering the machine * check handler. */ int order = -1; /* * If no_way_out gets set, there is no safe way to recover from this * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. */ int no_way_out = 0; /* * If kill_it gets set, there might be a way to recover from this * error. */ int kill_it = 0; DECLARE_BITMAP(toclear, MAX_NR_BANKS); DECLARE_BITMAP(valid_banks, MAX_NR_BANKS); char *msg = "Unknown"; /* * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES * on Intel. */ int lmce = 1; int cpu = smp_processor_id(); /* * Cases where we avoid rendezvous handler timeout: * 1) If this CPU is offline. * * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to * skip those CPUs which remain looping in the 1st kernel - see * crash_nmi_callback(). * * Note: there still is a small window between kexec-ing and the new, * kdump kernel establishing a new #MC handler where a broadcasted MCE * might not get handled properly. */ if (cpu_is_offline(cpu) || (crashing_cpu != -1 && crashing_cpu != cpu)) { u64 mcgstatus; mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); if (mcgstatus & MCG_STATUS_RIPV) { mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); return; } } ist_enter(regs); this_cpu_inc(mce_exception_count); if (!cfg->banks) goto out; mce_gather_info(&m, regs); m.tsc = rdtsc(); final = this_cpu_ptr(&mces_seen); *final = m; memset(valid_banks, 0, sizeof(valid_banks)); no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); barrier(); /* * When no restart IP might need to kill or panic. * Assume the worst for now, but if we find the * severity is MCE_AR_SEVERITY we have other options. */ if (!(m.mcgstatus & MCG_STATUS_RIPV)) kill_it = 1; /* * Check if this MCE is signaled to only this logical processor, * on Intel only. */ if (m.cpuvendor == X86_VENDOR_INTEL) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* * Go through all banks in exclusion of the other CPUs. This way we * don't report duplicated events on shared banks because the first one * to see it will clear it. If this is a Local MCE, then no need to * perform rendezvous. */ if (!lmce) order = mce_start(&no_way_out); for (i = 0; i < cfg->banks; i++) { __clear_bit(i, toclear); if (!test_bit(i, valid_banks)) continue; if (!mce_banks[i].ctl) continue; m.misc = 0; m.addr = 0; m.bank = i; m.status = mce_rdmsrl(msr_ops.status(i)); if ((m.status & MCI_STATUS_VAL) == 0) continue; /* * Non uncorrected or non signaled errors are handled by * machine_check_poll. Leave them alone, unless this panics. */ if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) && !no_way_out) continue; /* * Set taint even when machine check was not enabled. */ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); severity = mce_severity(&m, cfg->tolerant, NULL, true); /* * When machine check was for corrected/deferred handler don't * touch, unless we're panicing. */ if ((severity == MCE_KEEP_SEVERITY || severity == MCE_UCNA_SEVERITY) && !no_way_out) continue; __set_bit(i, toclear); if (severity == MCE_NO_SEVERITY) { /* * Machine check event was not enabled. Clear, but * ignore. */ continue; } mce_read_aux(&m, i); /* assuming valid severity level != 0 */ m.severity = severity; mce_log(&m); if (severity > worst) { *final = m; worst = severity; } } /* mce_clear_state will clear *final, save locally for use later */ m = *final; if (!no_way_out) mce_clear_state(toclear); /* * Do most of the synchronization with other CPUs. * When there's any problem use only local no_way_out state. */ if (!lmce) { if (mce_end(order) < 0) no_way_out = worst >= MCE_PANIC_SEVERITY; } else { /* * Local MCE skipped calling mce_reign() * If we found a fatal error, we need to panic here. */ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) mce_panic("Machine check from unknown source", NULL, NULL); } /* * If tolerant is at an insane level we drop requests to kill * processes and continue even when there is no way out. */ if (cfg->tolerant == 3) kill_it = 0; else if (no_way_out) mce_panic("Fatal machine check on current CPU", &m, msg); if (worst > 0) mce_report_event(regs); mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); out: sync_core(); if (worst != MCE_AR_SEVERITY && !kill_it) goto out_ist; /* Fault was in user mode and we need to take some action */ if ((m.cs & 3) == 3) { ist_begin_non_atomic(regs); local_irq_enable(); if (kill_it || do_memory_failure(&m)) force_sig(SIGBUS, current); local_irq_disable(); ist_end_non_atomic(); } else { if (!fixup_exception(regs, X86_TRAP_MC)) mce_panic("Failed kernel mode recovery", &m, NULL); } out_ist: ist_exit(regs); }
0
[ "CWE-362" ]
linux
b3b7c4795ccab5be71f080774c45bbbcc75c2aaf
93,880,939,364,004,160,000,000,000,000,000,000,000
216
x86/MCE: Serialize sysfs changes The check_interval file in /sys/devices/system/machinecheck/machinecheck<cpu number> directory is a global timer value for MCE polling. If it is changed by one CPU, mce_restart() broadcasts the event to other CPUs to delete and restart the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the mce_timer variable. If more than one CPU writes a specific value to the check_interval file concurrently, mce_timer is not protected from such concurrent accesses and all kinds of explosions happen. Since only root can write to those sysfs variables, the issue is not a big deal security-wise. However, concurrent writes to these configuration variables is void of reason so the proper thing to do is to serialize the access with a mutex. Boris: - Make store_int_with_restart() use device_store_ulong() to filter out negative intervals - Limit min interval to 1 second - Correct locking - Massage commit message Signed-off-by: Seunghun Han <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Tony Luck <[email protected]> Cc: linux-edac <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected]
fm_mgr_config_init ( OUT p_fm_config_conx_hdlt *p_hdl, IN int instance, OPTIONAL IN char *rem_address, OPTIONAL IN char *community ) { fm_config_conx_hdl *hdl; fm_mgr_config_errno_t res = FM_CONF_OK; if ( (hdl = calloc(1,sizeof(fm_config_conx_hdl))) == NULL ) { res = FM_CONF_NO_MEM; goto cleanup; } hdl->instance = instance; *p_hdl = hdl; // connect to the snmp agent via localhost? if(!rem_address || (strcmp(rem_address,"localhost") == 0)) { if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_SM) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_PM) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } if ( fm_mgr_config_mgr_connect(hdl, FM_MGR_FE) == FM_CONF_INIT_ERR ) { res = FM_CONF_INIT_ERR; goto cleanup; } } return res; cleanup: if ( hdl ) { free(hdl); hdl = NULL; } return res; }
1
[ "CWE-362" ]
opa-fm
c5759e7b76f5bf844be6c6641cc1b356bbc83869
271,488,843,839,979,970,000,000,000,000,000,000,000
57
Fix scripts and code that use well-known tmp files.
ins_tabline(int c) { // We will be leaving the current window, unless closing another tab. if (c != K_TABMENU || current_tabmenu != TABLINE_MENU_CLOSE || (current_tab != 0 && current_tab != tabpage_index(curtab))) { undisplay_dollar(); start_arrow(&curwin->w_cursor); can_cindent = TRUE; } if (c == K_TABLINE) goto_tabpage(current_tab); else { handle_tabmenu(); redraw_statuslines(); // will redraw the tabline when needed } }
0
[ "CWE-122", "CWE-787" ]
vim
0971c7a4e537ea120a6bb2195960be8d0815e97b
86,372,114,509,595,080,000,000,000,000,000,000,000
19
patch 8.2.5162: reading before the start of the line with BS in Replace mode Problem: Reading before the start of the line with BS in Replace mode. Solution: Check the cursor column is more than zero.
automount_all_volumes_idle_cb (gpointer data) { NautilusApplication *application = NAUTILUS_APPLICATION (data); automount_all_volumes (application); application->automount_idle_id = 0; return FALSE; }
0
[]
nautilus
1e1c916f5537eb5e4144950f291f4a3962fc2395
54,604,298,768,214,660,000,000,000,000,000,000,000
9
Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-file-operations.c: * libnautilus-private/nautilus-file-operations.h: * libnautilus-private/nautilus-mime-actions.c: Add "interactive" argument to nautilus_file_mark_desktop_file_trusted. * src/nautilus-application.c: Mark all desktopfiles on the desktop trusted on first run. svn path=/trunk/; revision=15009
TEST(FieldPath, ScalarVariableWithDottedFieldPathOptimizesToConstantMissingValue) { intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); auto varId = expCtx->variablesParseState.defineVariable("userVar"); expCtx->variables.setConstantValue(varId, Value(123)); auto expr = ExpressionFieldPath::parse(expCtx, "$$userVar.x.y", expCtx->variablesParseState); ASSERT_TRUE(dynamic_cast<ExpressionFieldPath*>(expr.get())); auto optimizedExpr = expr->optimize(); auto constantExpr = dynamic_cast<ExpressionConstant*>(optimizedExpr.get()); ASSERT_TRUE(constantExpr); ASSERT_VALUE_EQ(Value(), constantExpr->getValue()); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
78,165,574,838,119,030,000,000,000,000,000,000,000
13
SERVER-38070 fix infinite loop in agg expression
static void l2cap_chan_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, chan_timer.work); struct l2cap_conn *conn = chan->conn; int reason; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); mutex_lock(&conn->chan_lock); l2cap_chan_lock(chan); if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) reason = ECONNREFUSED; else if (chan->state == BT_CONNECT && chan->sec_level != BT_SECURITY_SDP) reason = ECONNREFUSED; else reason = ETIMEDOUT; l2cap_chan_close(chan, reason); l2cap_chan_unlock(chan); chan->ops->close(chan); mutex_unlock(&conn->chan_lock); l2cap_chan_put(chan); }
0
[ "CWE-787" ]
linux
e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
205,494,287,239,673,380,000,000,000,000,000,000,000
29
Bluetooth: Properly check L2CAP config option output buffer length Validate the output buffer length for L2CAP config requests and responses to avoid overflowing the stack buffer used for building the option blocks. Cc: [email protected] Signed-off-by: Ben Seri <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>