func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
struct mm_struct *mm_for_maps(struct task_struct *task) { struct mm_struct *mm; if (mutex_lock_killable(&task->cred_guard_mutex)) return NULL; mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, PTRACE_MODE_READ)) { mmput(mm); mm = NULL; } mutex_unlock(&task->cred_guard_mutex); return mm; }
0
[ "CWE-20", "CWE-362", "CWE-416" ]
linux
86acdca1b63e6890540fa19495cfc708beff3d8b
129,547,494,529,284,340,000,000,000,000,000,000,000
17
fix autofs/afs/etc. magic mountpoint breakage We end up trying to kfree() nd.last.name on open("/mnt/tmp", O_CREAT) if /mnt/tmp is an autofs direct mount. The reason is that nd.last_type is bogus here; we want LAST_BIND for everything of that kind and we get LAST_NORM left over from finding parent directory. So make sure that it *is* set properly; set to LAST_BIND before doing ->follow_link() - for normal symlinks it will be changed by __vfs_follow_link() and everything else needs it set that way. Signed-off-by: Al Viro <[email protected]>
int blkid_partition_set_type(blkid_partition par, int type) { par->type = type; return 0; }
0
[]
util-linux
50d1594c2e6142a3b51d2143c74027480df082e0
270,504,767,877,140,440,000,000,000,000,000,000,000
5
libblkid: avoid non-empty recursion in EBR This is extension to the patch 7164a1c34d18831ac61c6744ad14ce916d389b3f. We also need to detect non-empty recursion in the EBR chain. It's possible to create standard valid logical partitions and in the last one points back to the EBR chain. In this case all offsets will be non-empty. Unfortunately, it's valid to create logical partitions that are not in the "disk order" (sorted by start offset). So link somewhere back is valid, but this link cannot points to already existing partition (otherwise we will see recursion). This patch forces libblkid to ignore duplicate logical partitions, the duplicate chain segment is interpreted as non-data segment, after 100 iterations with non-data segments it will break the loop -- no memory is allocated in this case by the loop. Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=1349536 References: http://seclists.org/oss-sec/2016/q3/40 Signed-off-by: Karel Zak <[email protected]>
shortpath_for_partial( char_u **fnamep, char_u **bufp, int *fnamelen) { int sepcount, len, tflen; char_u *p; char_u *pbuf, *tfname; int hasTilde; // Count up the path separators from the RHS.. so we know which part // of the path to return. sepcount = 0; for (p = *fnamep; p < *fnamep + *fnamelen; MB_PTR_ADV(p)) if (vim_ispathsep(*p)) ++sepcount; // Need full path first (use expand_env() to remove a "~/") hasTilde = (**fnamep == '~'); if (hasTilde) pbuf = tfname = expand_env_save(*fnamep); else pbuf = tfname = FullName_save(*fnamep, FALSE); len = tflen = (int)STRLEN(tfname); if (get_short_pathname(&tfname, &pbuf, &len) == FAIL) return FAIL; if (len == 0) { // Don't have a valid filename, so shorten the rest of the // path if we can. This CAN give us invalid 8.3 filenames, but // there's not a lot of point in guessing what it might be. len = tflen; if (shortpath_for_invalid_fname(&tfname, &pbuf, &len) == FAIL) return FAIL; } // Count the paths backward to find the beginning of the desired string. for (p = tfname + len - 1; p >= tfname; --p) { if (has_mbyte) p -= mb_head_off(tfname, p); if (vim_ispathsep(*p)) { if (sepcount == 0 || (hasTilde && sepcount == 1)) break; else sepcount --; } } if (hasTilde) { --p; if (p >= tfname) *p = '~'; else return FAIL; } else ++p; // Copy in the string - p indexes into tfname - allocated at pbuf vim_free(*bufp); *fnamelen = (int)STRLEN(p); *bufp = pbuf; *fnamep = p; return OK; }
0
[ "CWE-823", "CWE-703" ]
vim
5921aeb5741fc6e84c870d68c7c35b93ad0c9f87
91,639,414,591,749,070,000,000,000,000,000,000,000
71
patch 8.2.4418: crash when using special multi-byte character Problem: Crash when using special multi-byte character. Solution: Don't use isalpha() for an arbitrary character.
struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, int first_ok) { struct tty_struct *tty; int retval; /* Check if pty master is being opened multiple times */ if (driver->subtype == PTY_TYPE_MASTER && (driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) { return ERR_PTR(-EIO); } /* * First time open is complex, especially for PTY devices. * This code guarantees that either everything succeeds and the * TTY is ready for operation, or else the table slots are vacated * and the allocated memory released. (Except that the termios * and locked termios may be retained.) */ if (!try_module_get(driver->owner)) return ERR_PTR(-ENODEV); tty = alloc_tty_struct(); if (!tty) { retval = -ENOMEM; goto err_module_put; } initialize_tty_struct(tty, driver, idx); retval = tty_driver_install_tty(driver, tty); if (retval < 0) goto err_deinit_tty; /* * Structures all installed ... call the ldisc open routines. * If we fail here just call release_tty to clean up. No need * to decrement the use counts, as release_tty doesn't care. */ retval = tty_ldisc_setup(tty, tty->link); if (retval) goto err_release_tty; return tty; err_deinit_tty: deinitialize_tty_struct(tty); free_tty_struct(tty); err_module_put: module_put(driver->owner); return ERR_PTR(retval); /* call the tty release_tty routine to clean out this slot */ err_release_tty: printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, " "clearing slot %d\n", idx); release_tty(tty, idx); return ERR_PTR(retval); }
0
[ "CWE-703" ]
linux
c290f8358acaeffd8e0c551ddcc24d1206143376
581,604,091,808,194,000,000,000,000,000,000,000
58
TTY: drop driver reference in tty_open fail path When tty_driver_lookup_tty fails in tty_open, we forget to drop a reference to the tty driver. This was added by commit 4a2b5fddd5 (Move tty lookup/reopen to caller). Fix that by adding tty_driver_kref_put to the fail path. I will refactor the code later. This is for the ease of backporting to stable. Introduced-in: v2.6.28-rc2 Signed-off-by: Jiri Slaby <[email protected]> Cc: stable <[email protected]> Cc: Alan Cox <[email protected]> Acked-by: Sukadev Bhattiprolu <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
xfs_bmapi_update_map( struct xfs_bmbt_irec **map, xfs_fileoff_t *bno, xfs_filblks_t *len, xfs_fileoff_t obno, xfs_fileoff_t end, int *n, int flags) { xfs_bmbt_irec_t *mval = *map; ASSERT((flags & XFS_BMAPI_ENTIRE) || ((mval->br_startoff + mval->br_blockcount) <= end)); ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) || (mval->br_startoff < obno)); *bno = mval->br_startoff + mval->br_blockcount; *len = end - *bno; if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) { /* update previous map with new information */ ASSERT(mval->br_startblock == mval[-1].br_startblock); ASSERT(mval->br_blockcount > mval[-1].br_blockcount); ASSERT(mval->br_state == mval[-1].br_state); mval[-1].br_blockcount = mval->br_blockcount; mval[-1].br_state = mval->br_state; } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK && mval[-1].br_startblock != DELAYSTARTBLOCK && mval[-1].br_startblock != HOLESTARTBLOCK && mval->br_startblock == mval[-1].br_startblock + mval[-1].br_blockcount && ((flags & XFS_BMAPI_IGSTATE) || mval[-1].br_state == mval->br_state)) { ASSERT(mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount); mval[-1].br_blockcount += mval->br_blockcount; } else if (*n > 0 && mval->br_startblock == DELAYSTARTBLOCK && mval[-1].br_startblock == DELAYSTARTBLOCK && mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount) { mval[-1].br_blockcount += mval->br_blockcount; mval[-1].br_state = mval->br_state; } else if (!((*n == 0) && ((mval->br_startoff + mval->br_blockcount) <= obno))) { mval++; (*n)++; } *map = mval; }
0
[]
linux
2c4306f719b083d17df2963bc761777576b8ad1b
218,379,992,714,306,000,000,000,000,000,000,000,000
50
xfs: set format back to extents if xfs_bmap_extents_to_btree If xfs_bmap_extents_to_btree fails in a mode where we call xfs_iroot_realloc(-1) to de-allocate the root, set the format back to extents. Otherwise we can assume we can dereference ifp->if_broot based on the XFS_DINODE_FMT_BTREE format, and crash. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199423 Signed-off-by: Eric Sandeen <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Darrick J. Wong <[email protected]> Signed-off-by: Darrick J. Wong <[email protected]>
static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) { stbi__uint16 px = (stbi__uint16)stbi__get16le(s); stbi__uint16 fiveBitMask = 31; // we have 3 channels with 5bits each int r = (px >> 10) & fiveBitMask; int g = (px >> 5) & fiveBitMask; int b = px & fiveBitMask; // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later out[0] = (stbi_uc)((r * 255)/31); out[1] = (stbi_uc)((g * 255)/31); out[2] = (stbi_uc)((b * 255)/31); // some people claim that the most significant bit might be used for alpha // (possibly if an alpha-bit is set in the "image descriptor byte") // but that only made 16bit test images completely translucent.. // so let's treat all 15 and 16bit TGAs as RGB with no alpha. }
0
[ "CWE-787" ]
stb
5ba0baaa269b3fd681828e0e3b3ac0f1472eaf40
268,628,128,105,298,200,000,000,000,000,000,000,000
18
stb_image: Reject fractional JPEG component subsampling ratios The component resamplers are not written to support this and I've never seen it happen in a real (non-crafted) JPEG file so I'm fine rejecting this as outright corrupt. Fixes issue #1178.
inbound_topictime (server *serv, char *chan, char *nick, time_t stamp, const message_tags_data *tags_data) { char *tim = ctime (&stamp); session *sess = find_channel (serv, chan); if (!sess) sess = serv->server_session; tim[24] = 0; /* get rid of the \n */ EMIT_SIGNAL_TIMESTAMP (XP_TE_TOPICDATE, sess, chan, nick, tim, NULL, 0, tags_data->timestamp); }
0
[ "CWE-22" ]
hexchat
4e061a43b3453a9856d34250c3913175c45afe9d
67,692,530,169,368,630,000,000,000,000,000,000,000
13
Clean up handling CAP LS
ast_error(struct compiling *c, const node *n, const char *errmsg, ...) { PyObject *value, *errstr, *loc, *tmp; va_list va; va_start(va, errmsg); errstr = PyUnicode_FromFormatV(errmsg, va); va_end(va); if (!errstr) { return 0; } loc = PyErr_ProgramTextObject(c->c_filename, LINENO(n)); if (!loc) { Py_INCREF(Py_None); loc = Py_None; } tmp = Py_BuildValue("(OiiN)", c->c_filename, LINENO(n), n->n_col_offset + 1, loc); if (!tmp) { Py_DECREF(errstr); return 0; } value = PyTuple_Pack(2, errstr, tmp); Py_DECREF(errstr); Py_DECREF(tmp); if (value) { PyErr_SetObject(PyExc_SyntaxError, value); Py_DECREF(value); } return 0; }
0
[ "CWE-125" ]
cpython
a4d78362397fc3bced6ea80fbc7b5f4827aec55e
295,965,485,146,715,440,000,000,000,000,000,000,000
30
bpo-36495: Fix two out-of-bounds array reads (GH-12641) Research and fix by @bradlarsen.
void free_vfsmnt(struct vfsmount *mnt) { kfree(mnt->mnt_devname); kmem_cache_free(mnt_cache, mnt); }
0
[ "CWE-269" ]
linux-2.6
ee6f958291e2a768fd727e7a67badfff0b67711a
27,819,382,331,903,354,000,000,000,000,000,000,000
5
check privileges before setting mount propagation There's a missing check for CAP_SYS_ADMIN in do_change_type(). Signed-off-by: Miklos Szeredi <[email protected]> Cc: Al Viro <[email protected]> Cc: Christoph Hellwig <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline int security_socket_getsockopt(struct socket *sock, int level, int optname) { return 0; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
339,584,043,740,584,300,000,000,000,000,000,000,000
5
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
ogs_sbi_request_t *ogs_sbi_build_request(ogs_sbi_message_t *message) { ogs_sbi_request_t *request = NULL; ogs_assert(message); request = ogs_sbi_request_new(); ogs_expect_or_return_val(request, NULL); ogs_expect_or_return_val(message->h.method, NULL); request->h.method = ogs_strdup(message->h.method); if (message->h.uri) { ogs_expect_or_return_val(message->h.uri, NULL); request->h.uri = ogs_strdup(message->h.uri); ogs_expect_or_return_val(request->h.uri, NULL); } else { int i; ogs_expect_or_return_val(message->h.service.name, NULL); request->h.service.name = ogs_strdup(message->h.service.name); ogs_expect_or_return_val(message->h.api.version, NULL); request->h.api.version = ogs_strdup(message->h.api.version); ogs_expect_or_return_val(request->h.api.version, NULL); ogs_expect_or_return_val(message->h.resource.component[0], NULL); for (i = 0; i < OGS_SBI_MAX_NUM_OF_RESOURCE_COMPONENT && message->h.resource.component[i]; i++) request->h.resource.component[i] = ogs_strdup( message->h.resource.component[i]); } /* URL Param */ if (message->param.nf_id) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_NF_ID, message->param.nf_id); } if (message->param.nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_NF_TYPE, v); } if (message->param.requester_nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.requester_nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_REQUESTER_NF_TYPE, v); } if (message->param.target_nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.target_nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_TARGET_NF_TYPE, v); } if (message->param.limit) { char *v = ogs_msprintf("%d", message->param.limit); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_LIMIT, v); ogs_free(v); } if (message->param.dnn) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_DNN, message->param.dnn); } if (message->param.plmn_id_presence) { OpenAPI_plmn_id_t plmn_id; plmn_id.mnc = ogs_plmn_id_mnc_string(&message->param.plmn_id); plmn_id.mcc = ogs_plmn_id_mcc_string(&message->param.plmn_id); if (plmn_id.mnc && plmn_id.mcc) { char *v = NULL; cJSON *item = NULL; item = OpenAPI_plmn_id_convertToJSON(&plmn_id); ogs_expect_or_return_val(item, NULL); if (plmn_id.mnc) ogs_free(plmn_id.mnc); if (plmn_id.mcc) ogs_free(plmn_id.mcc); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_PLMN_ID, v); ogs_free(v); } } if (message->param.single_nssai_presence) { char *v = ogs_sbi_s_nssai_to_string(&message->param.s_nssai); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SINGLE_NSSAI, v); ogs_free(v); } if (message->param.snssai_presence) { char *v = ogs_sbi_s_nssai_to_string(&message->param.s_nssai); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SNSSAI, v); ogs_free(v); } if (message->param.plmn_id_presence) { OpenAPI_plmn_id_t plmn_id; plmn_id.mnc = ogs_plmn_id_mnc_string(&message->param.plmn_id); plmn_id.mcc = ogs_plmn_id_mcc_string(&message->param.plmn_id); if (plmn_id.mnc && plmn_id.mcc) { char *v = NULL; cJSON *item = NULL; item = OpenAPI_plmn_id_convertToJSON(&plmn_id); ogs_expect_or_return_val(item, NULL); if (plmn_id.mnc) ogs_free(plmn_id.mnc); if (plmn_id.mcc) ogs_free(plmn_id.mcc); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_PLMN_ID, v); ogs_free(v); } } if (message->param.slice_info_request_for_pdu_session_presence) { OpenAPI_slice_info_for_pdu_session_t SliceInfoForPDUSession; OpenAPI_snssai_t sNSSAI; char *v = NULL; cJSON *item = NULL; ogs_expect_or_return_val(message->param.s_nssai.sst, NULL); ogs_expect_or_return_val(message->param.roaming_indication, NULL); memset(&sNSSAI, 0, sizeof(sNSSAI)); sNSSAI.sst = message->param.s_nssai.sst; sNSSAI.sd = ogs_s_nssai_sd_to_string(message->param.s_nssai.sd); memset(&SliceInfoForPDUSession, 0, sizeof(SliceInfoForPDUSession)); SliceInfoForPDUSession.s_nssai = &sNSSAI; SliceInfoForPDUSession.roaming_indication = message->param.roaming_indication; item = OpenAPI_slice_info_for_pdu_session_convertToJSON( &SliceInfoForPDUSession); ogs_expect_or_return_val(item, NULL); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SLICE_INFO_REQUEST_FOR_PDU_SESSION, v); ogs_free(v); if (sNSSAI.sd) ogs_free(sNSSAI.sd); } if (message->param.ipv4addr) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_IPV4ADDR, message->param.ipv4addr); } if (message->param.ipv6prefix) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_IPV6PREFIX, message->param.ipv6prefix); } ogs_expect_or_return_val(true == build_content(&request->http, message), NULL); if (message->http.accept) { ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, message->http.accept); } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_DELETE) ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, OGS_SBI_CONTENT_PROBLEM_TYPE); break; DEFAULT ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, OGS_SBI_CONTENT_JSON_TYPE "," OGS_SBI_CONTENT_PROBLEM_TYPE); break; END } if (message->http.content_encoding) ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT_ENCODING, message->http.content_encoding); return request; }
0
[ "CWE-476", "CWE-787" ]
open5gs
d919b2744cd05abae043490f0a3dd1946c1ccb8c
121,178,023,650,760,330,000,000,000,000,000,000,000
190
[AMF] fix the memory problem (#1247) 1. memory corruption - Overflow num_of_part in SBI message 2. null pointer dereference - n2InfoContent->ngap_ie_type
lyp_check_edit_attr(struct ly_ctx *ctx, struct lyd_attr *attr, struct lyd_node *parent, int *editbits) { struct lyd_attr *last = NULL; int bits = 0; /* 0x01 - insert attribute present * 0x02 - insert is relative (before or after) * 0x04 - value attribute present * 0x08 - key attribute present * 0x10 - operation attribute present * 0x20 - operation not allowing insert attribute (delete or remove) */ LY_TREE_FOR(attr, attr) { last = NULL; if (!strcmp(attr->annotation->arg_value, "operation") && !strcmp(attr->annotation->module->name, "ietf-netconf")) { if (bits & 0x10) { LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_LYD, parent, "operation attributes", parent->schema->name); return -1; } bits |= 0x10; if (attr->value.enm->value >= 3) { /* delete or remove */ bits |= 0x20; } } else if (attr->annotation->module == ctx->models.list[1] && /* internal YANG schema */ !strcmp(attr->annotation->arg_value, "insert")) { /* 'insert' attribute present */ if (!(parent->schema->flags & LYS_USERORDERED)) { /* ... but it is not expected */ LOGVAL(ctx, LYE_INATTR, LY_VLOG_LYD, parent, "insert"); return -1; } if (bits & 0x01) { LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_LYD, parent, "insert attributes", parent->schema->name); return -1; } bits |= 0x01; if (attr->value.enm->value >= 2) { /* before or after */ bits |= 0x02; } last = attr; } else if (attr->annotation->module == ctx->models.list[1] && /* internal YANG schema */ !strcmp(attr->annotation->arg_value, "value")) { if (bits & 0x04) { LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_LYD, parent, "value attributes", parent->schema->name); return -1; } else if (parent->schema->nodetype & LYS_LIST) { LOGVAL(ctx, LYE_INATTR, LY_VLOG_LYD, parent, attr->name); return -1; } bits |= 0x04; last = attr; } else if (attr->annotation->module == ctx->models.list[1] && /* internal YANG schema */ !strcmp(attr->annotation->arg_value, "key")) { if (bits & 0x08) { LOGVAL(ctx, LYE_TOOMANY, LY_VLOG_LYD, parent, "key attributes", parent->schema->name); return -1; } else if (parent->schema->nodetype & LYS_LEAFLIST) { LOGVAL(ctx, LYE_INATTR, LY_VLOG_LYD, parent, attr->name); return -1; } bits |= 0x08; last = attr; } } /* report errors */ if (last && (!(parent->schema->nodetype & (LYS_LEAFLIST | LYS_LIST)) || !(parent->schema->flags & LYS_USERORDERED))) { /* moving attributes in wrong elements (not an user ordered list or not a list at all) */ LOGVAL(ctx, LYE_INATTR, LY_VLOG_LYD, parent, last->name); return -1; } else if (bits == 3) { /* 0x01 | 0x02 - relative position, but value/key is missing */ if (parent->schema->nodetype & LYS_LIST) { LOGVAL(ctx, LYE_MISSATTR, LY_VLOG_LYD, parent, "key", parent->schema->name); } else { /* LYS_LEAFLIST */ LOGVAL(ctx, LYE_MISSATTR, LY_VLOG_LYD, parent, "value", parent->schema->name); } return -1; } else if ((bits & (0x04 | 0x08)) && !(bits & 0x02)) { /* key/value without relative position */ LOGVAL(ctx, LYE_INATTR, LY_VLOG_LYD, parent, (bits & 0x04) ? "value" : "key"); return -1; } else if ((bits & 0x21) == 0x21) { /* insert in delete/remove */ LOGVAL(ctx, LYE_INATTR, LY_VLOG_LYD, parent, "insert"); return -1; } if (editbits) { *editbits = bits; } return 0; }
0
[ "CWE-787" ]
libyang
f6d684ade99dd37b21babaa8a856f64faa1e2e0d
173,327,425,604,411,270,000,000,000,000,000,000,000
98
parser BUGFIX long identity name buffer overflow STRING_OVERFLOW (CWE-120)
void Monitor::update_mon_metadata(int from, Metadata&& m) { // NOTE: this is now for legacy (kraken or jewel) mons only. pending_metadata[from] = std::move(m); MonitorDBStore::TransactionRef t = paxos->get_pending_transaction(); bufferlist bl; ::encode(pending_metadata, bl); t->put(MONITOR_STORE_PREFIX, "last_metadata", bl); paxos->trigger_propose(); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
142,348,583,834,072,810,000,000,000,000,000,000,000
11
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
void JBIG2MMRDecoder::skipTo(unsigned int length) { int n = str->discardChars(length - nBytesRead); nBytesRead += n; byteCounter += n; }
0
[ "CWE-476", "CWE-190" ]
poppler
27354e9d9696ee2bc063910a6c9a6b27c5184a52
245,909,794,126,869,700,000,000,000,000,000,000,000
6
JBIG2Stream: Fix crash on broken file https://github.com/jeffssh/CVE-2021-30860 Thanks to David Warren for the heads up
int JPXStream::getChars(int nChars, unsigned char *buffer) { if (unlikely(priv->inited == false)) { init(); } for (int i = 0; i < nChars; ++i) { const int c = doGetChar(priv); if (likely(c != EOF)) buffer[i] = c; else return i; } return nChars; }
0
[ "CWE-125" ]
poppler
89a5367d49b2556a2635dbb6d48d6a6b182a2c6c
123,198,541,907,388,760,000,000,000,000,000,000,000
10
JPEG2000Stream: fail gracefully if not all components have the same WxH I think this is just a mistake, or at least the only file we have with this scenario is a fuzzed one
void SSL_set_shutdown(SSL *s, int mode) { s->shutdown = mode; }
0
[ "CWE-310" ]
openssl
56f1acf5ef8a432992497a04792ff4b3b2c6f286
100,091,310,450,541,280,000,000,000,000,000,000,000
4
Disable SSLv2 default build, default negotiation and weak ciphers. SSLv2 is by default disabled at build-time. Builds that are not configured with "enable-ssl2" will not support SSLv2. Even if "enable-ssl2" is used, users who want to negotiate SSLv2 via the version-flexible SSLv23_method() will need to explicitly call either of: SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2); or SSL_clear_options(ssl, SSL_OP_NO_SSLv2); as appropriate. Even if either of those is used, or the application explicitly uses the version-specific SSLv2_method() or its client or server variants, SSLv2 ciphers vulnerable to exhaustive search key recovery have been removed. Specifically, the SSLv2 40-bit EXPORT ciphers, and SSLv2 56-bit DES are no longer available. Mitigation for CVE-2016-0800 Reviewed-by: Emilia Käsper <[email protected]>
run_startup_files () { #if defined (JOB_CONTROL) int old_job_control; #endif int sourced_login, run_by_ssh; /* get the rshd/sshd case out of the way first. */ if (interactive_shell == 0 && no_rc == 0 && login_shell == 0 && act_like_sh == 0 && command_execution_string) { #ifdef SSH_SOURCE_BASHRC run_by_ssh = (find_variable ("SSH_CLIENT") != (SHELL_VAR *)0) || (find_variable ("SSH2_CLIENT") != (SHELL_VAR *)0); #else run_by_ssh = 0; #endif /* If we were run by sshd or we think we were run by rshd, execute ~/.bashrc if we are a top-level shell. */ if ((run_by_ssh || isnetconn (fileno (stdin))) && shell_level < 2) { #ifdef SYS_BASHRC # if defined (__OPENNT) maybe_execute_file (_prefixInstallPath(SYS_BASHRC, NULL, 0), 1); # else maybe_execute_file (SYS_BASHRC, 1); # endif #endif maybe_execute_file (bashrc_file, 1); return; } } #if defined (JOB_CONTROL) /* Startup files should be run without job control enabled. */ old_job_control = interactive_shell ? set_job_control (0) : 0; #endif sourced_login = 0; /* A shell begun with the --login (or -l) flag that is not in posix mode runs the login shell startup files, no matter whether or not it is interactive. If NON_INTERACTIVE_LOGIN_SHELLS is defined, run the startup files if argv[0][0] == '-' as well. */ #if defined (NON_INTERACTIVE_LOGIN_SHELLS) if (login_shell && posixly_correct == 0) #else if (login_shell < 0 && posixly_correct == 0) #endif { /* We don't execute .bashrc for login shells. */ no_rc++; /* Execute /etc/profile and one of the personal login shell initialization files. */ if (no_profile == 0) { maybe_execute_file (SYS_PROFILE, 1); if (act_like_sh) /* sh */ maybe_execute_file ("~/.profile", 1); else if ((maybe_execute_file ("~/.bash_profile", 1) == 0) && (maybe_execute_file ("~/.bash_login", 1) == 0)) /* bash */ maybe_execute_file ("~/.profile", 1); } sourced_login = 1; } /* A non-interactive shell not named `sh' and not in posix mode reads and executes commands from $BASH_ENV. If `su' starts a shell with `-c cmd' and `-su' as the name of the shell, we want to read the startup files. No other non-interactive shells read any startup files. */ if (interactive_shell == 0 && !(su_shell && login_shell)) { if (posixly_correct == 0 && act_like_sh == 0 && privileged_mode == 0 && sourced_env++ == 0) execute_env_file (get_string_value ("BASH_ENV")); return; } /* Interactive shell or `-su' shell. */ if (posixly_correct == 0) /* bash, sh */ { if (login_shell && sourced_login++ == 0) { /* We don't execute .bashrc for login shells. */ no_rc++; /* Execute /etc/profile and one of the personal login shell initialization files. */ if (no_profile == 0) { maybe_execute_file (SYS_PROFILE, 1); if (act_like_sh) /* sh */ maybe_execute_file ("~/.profile", 1); else if ((maybe_execute_file ("~/.bash_profile", 1) == 0) && (maybe_execute_file ("~/.bash_login", 1) == 0)) /* bash */ maybe_execute_file ("~/.profile", 1); } } /* bash */ if (act_like_sh == 0 && no_rc == 0) { #ifdef SYS_BASHRC # if defined (__OPENNT) maybe_execute_file (_prefixInstallPath(SYS_BASHRC, NULL, 0), 1); # else maybe_execute_file (SYS_BASHRC, 1); # endif #endif maybe_execute_file (bashrc_file, 1); } /* sh */ else if (act_like_sh && privileged_mode == 0 && sourced_env++ == 0) execute_env_file (get_string_value ("ENV")); } else /* bash --posix, sh --posix */ { /* bash and sh */ if (interactive_shell && privileged_mode == 0 && sourced_env++ == 0) execute_env_file (get_string_value ("ENV")); } #if defined (JOB_CONTROL) set_job_control (old_job_control); #endif }
0
[ "CWE-273", "CWE-787" ]
bash
951bdaad7a18cc0dc1036bba86b18b90874d39ff
21,049,415,436,869,251,000,000,000,000,000,000,000
131
commit bash-20190628 snapshot
static void cdrom_mmc3_profile(struct cdrom_device_info *cdi) { struct packet_command cgc; char buffer[32]; int ret, mmc3_profile; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); cgc.cmd[0] = GPCMD_GET_CONFIGURATION; cgc.cmd[1] = 0; cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */ cgc.cmd[8] = sizeof(buffer); /* Allocation Length */ cgc.quiet = 1; if ((ret = cdi->ops->generic_packet(cdi, &cgc))) mmc3_profile = 0xffff; else mmc3_profile = (buffer[6] << 8) | buffer[7]; cdi->mmc3_profile = mmc3_profile; }
0
[ "CWE-119", "CWE-787" ]
linux
9de4ee40547fd315d4a0ed1dd15a2fa3559ad707
288,037,853,070,493,250,000,000,000,000,000,000,000
21
cdrom: information leak in cdrom_ioctl_media_changed() This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned long. The way the check is written now, if one of the high 32 bits is set then we could read outside the info->slots[] array. This bug is pretty old and it predates git. Reviewed-by: Christoph Hellwig <[email protected]> Cc: [email protected] Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static void fast_forward_char_pair_sse2_compare(struct sljit_compiler *compiler, PCRE2_UCHAR char1, PCRE2_UCHAR char2, sljit_u32 bit, sljit_s32 dst_ind, sljit_s32 cmp1_ind, sljit_s32 cmp2_ind, sljit_s32 tmp_ind) { sljit_u8 instruction[4]; instruction[0] = 0x66; instruction[1] = 0x0f; if (char1 == char2 || bit != 0) { if (bit != 0) { /* POR xmm1, xmm2/m128 */ /* instruction[0] = 0x66; */ /* instruction[1] = 0x0f; */ instruction[2] = 0xeb; instruction[3] = 0xc0 | (dst_ind << 3) | cmp2_ind; sljit_emit_op_custom(compiler, instruction, 4); } /* PCMPEQB/W/D xmm1, xmm2/m128 */ /* instruction[0] = 0x66; */ /* instruction[1] = 0x0f; */ instruction[2] = 0x74 + SSE2_COMPARE_TYPE_INDEX; instruction[3] = 0xc0 | (dst_ind << 3) | cmp1_ind; sljit_emit_op_custom(compiler, instruction, 4); } else { /* MOVDQA xmm1, xmm2/m128 */ /* instruction[0] = 0x66; */ /* instruction[1] = 0x0f; */ instruction[2] = 0x6f; instruction[3] = 0xc0 | (tmp_ind << 3) | dst_ind; sljit_emit_op_custom(compiler, instruction, 4); /* PCMPEQB/W/D xmm1, xmm2/m128 */ /* instruction[0] = 0x66; */ /* instruction[1] = 0x0f; */ instruction[2] = 0x74 + SSE2_COMPARE_TYPE_INDEX; instruction[3] = 0xc0 | (dst_ind << 3) | cmp1_ind; sljit_emit_op_custom(compiler, instruction, 4); instruction[3] = 0xc0 | (tmp_ind << 3) | cmp2_ind; sljit_emit_op_custom(compiler, instruction, 4); /* POR xmm1, xmm2/m128 */ /* instruction[0] = 0x66; */ /* instruction[1] = 0x0f; */ instruction[2] = 0xeb; instruction[3] = 0xc0 | (dst_ind << 3) | tmp_ind; sljit_emit_op_custom(compiler, instruction, 4); } }
0
[ "CWE-125" ]
php-src
8947fd9e9fdce87cd6c59817b1db58e789538fe9
283,756,984,602,291,620,000,000,000,000,000,000,000
53
Fix #78338: Array cross-border reading in PCRE We backport r1092 from pcre2.
arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; unsigned long begin, end; if (flags & MAP_FIXED) return addr; find_start_end(flags, &begin, &end); if (len > end) return -ENOMEM; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = begin; info.high_limit = end; info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; if (filp) { info.align_mask = get_align_mask(); info.align_offset += get_align_bits(); } return vm_unmapped_area(&info); }
1
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
118,450,408,525,831,480,000,000,000,000,000,000,000
36
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <[email protected]> Original-patch-by: Michal Hocko <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Michal Hocko <[email protected]> Tested-by: Helge Deller <[email protected]> # parisc Signed-off-by: Linus Torvalds <[email protected]>
read_system_page (Bit_Chain *dat, int64_t size_comp, int64_t size_uncomp, int64_t repeat_count) { int i; int error = 0; int64_t pesize; // Pre RS encoded size int64_t block_count; // Number of RS encoded blocks int64_t page_size; long pedata_size; BITCODE_RC *rsdata; // RS encoded data BITCODE_RC *pedata; // Pre RS encoded data BITCODE_RC *data; // The data RS unencoded and uncompressed if (repeat_count < 0 || repeat_count > DBG_MAX_COUNT || (uint64_t)size_comp >= dat->size || (uint64_t)size_uncomp >= dat->size) { LOG_ERROR ("Invalid r2007 system page: " "size_comp: %" PRId64 ", size_uncomp: %" PRId64 ", repeat_count: %" PRId64, size_comp, size_uncomp, repeat_count); return NULL; } // Round to a multiple of 8 pesize = ((size_comp + 7) & ~7) * repeat_count; // Divide pre encoded size by RS k-value (239) block_count = (pesize + 238) / 239; if (block_count <= 0 || block_count > DBG_MAX_COUNT) { LOG_ERROR ("Invalid r2007 system page: size_comp: %" PRId64 ", size_uncomp: %" PRId64, size_comp, size_uncomp); return NULL; } // Multiply with codeword size (255) and round to a multiple of 8 page_size = (block_count * 255 + 7) & ~7; if ((uint64_t)page_size >= DBG_MAX_COUNT || (unsigned long)page_size > dat->size - dat->byte) { LOG_ERROR ("Invalid r2007 system page: page_size: %" PRId64, page_size); return NULL; } LOG_HANDLE ("read_system_page: size_comp: %" PRId64 ", size_uncomp: %" PRId64 ", repeat_count: %" PRId64 "\n", size_comp, size_uncomp, repeat_count); assert ((uint64_t)size_comp < dat->size); assert ((uint64_t)size_uncomp < dat->size); assert ((uint64_t)repeat_count < DBG_MAX_COUNT); assert ((uint64_t)page_size < DBG_MAX_COUNT); data = (BITCODE_RC *)calloc (size_uncomp + page_size, 1); LOG_HANDLE ("Alloc system page of size %" PRId64 "\n", size_uncomp + page_size) if (!data) { LOG_ERROR ("Out of memory") return NULL; } rsdata = &data[size_uncomp]; bit_read_fixed (dat, rsdata, page_size); pedata_size = block_count * 239; pedata = decode_rs (rsdata, block_count, 239, page_size); if (!pedata) { free (data); return NULL; } if (size_comp < size_uncomp) error = decompress_r2007 (data, size_uncomp, pedata, MIN (pedata_size, size_comp)); else memcpy (data, pedata, size_uncomp); free (pedata); if (error >= DWG_ERR_CRITICAL) { free (data); return NULL; } return data; }
0
[ "CWE-787" ]
libredwg
45d2a290c65ed691be0901ba2b2ef51044e07a16
56,514,946,148,474,110,000,000,000,000,000,000,000
79
decode_r2007: fix for invalid section size See GH #350. With fuzzing section->data_size might not fit section_page->uncomp_size.
static void fourinsix(struct sockaddr_storage *v6) { struct sockaddr_storage v4; if (v6ready == 0 || STORAGE_FAMILY(*v6) != AF_INET6 || IN6_IS_ADDR_V4MAPPED(&STORAGE_SIN_ADDR6_NF_CONST(*v6)) == 0) { return; } memset(&v4, 0, sizeof v4); STORAGE_FAMILY(v4) = AF_INET; memcpy(&STORAGE_SIN_ADDR(v4), (unsigned char *) &STORAGE_SIN_ADDR6_CONST(*v6) + 12, sizeof STORAGE_SIN_ADDR(v4)); STORAGE_PORT(v4) = STORAGE_PORT6_CONST(*v6); SET_STORAGE_LEN(v4, sizeof(struct sockaddr_in)); *v6 = v4; }
0
[ "CWE-434" ]
pure-ftpd
37ad222868e52271905b94afea4fc780d83294b4
7,682,625,026,022,711,000,000,000,000,000,000,000
17
Initialize the max upload file size when quotas are enabled Due to an unwanted check, files causing the quota to be exceeded were deleted after the upload, but not during the upload. The bug was introduced in 2009 in version 1.0.23 Spotted by @DroidTest, thanks!
int vnc_display_disable_login(DisplayState *ds) { VncDisplay *vs = ds ? (VncDisplay *)ds->opaque : vnc_display; if (!vs) { return -1; } if (vs->password) { g_free(vs->password); } vs->password = NULL; if (vs->auth == VNC_AUTH_NONE) { vs->auth = VNC_AUTH_VNC; } return 0; }
0
[ "CWE-125" ]
qemu
9f64916da20eea67121d544698676295bbb105a7
234,460,861,243,630,200,000,000,000,000,000,000,000
19
pixman/vnc: use pixman images in vnc. The vnc code uses *three* DisplaySurfaces: First is the surface of the actual QemuConsole, usually the guest screen, but could also be a text console (monitor/serial reachable via Ctrl-Alt-<nr> keys). This is left as-is. Second is the current server's view of the screen content. The vnc code uses this to figure which parts of the guest screen did _really_ change to reduce the amount of updates sent to the vnc clients. It is also used as data source when sending out the updates to the clients. This surface gets replaced by a pixman image. The format changes too, instead of using the guest screen format we'll use fixed 32bit rgb framebuffer and convert the pixels on the fly when comparing and updating the server framebuffer. Third surface carries the format expected by the vnc client. That isn't used to store image data. This surface is switched to PixelFormat and a boolean for bigendian byte order. Signed-off-by: Gerd Hoffmann <[email protected]>
pdf14_create_compositor(gx_device * dev, gx_device * * pcdev, const gs_composite_t * pct, gs_gstate * pgs, gs_memory_t * mem, gx_device *cdev) { pdf14_device *p14dev = (pdf14_device *)dev; if (gs_is_pdf14trans_compositor(pct)) { const gs_pdf14trans_t * pdf14pct = (const gs_pdf14trans_t *) pct; *pcdev = dev; /* cdev, may be the clist reader device which may contain information that we will need related to the ICC color spaces that define transparency groups. We want this propogated through all the pdf14 functions. Store a pointer to it in the pdf14 device */ p14dev->pclist_device = cdev; return gx_update_pdf14_compositor(dev, pgs, pdf14pct, mem); } else if (gs_is_overprint_compositor(pct)) { /* If we had an overprint compositer action, then the color components that were drawn should be updated. The overprint compositor logic and its interactions with the clist is a little odd as it passes uninitialized values around a fair amount. Hence the forced assignement here. See gx_spot_colors_set_overprint in gscspace for issues... */ const gs_overprint_t * op_pct = (const gs_overprint_t *) pct; if (op_pct->params.retain_any_comps && !op_pct->params.retain_spot_comps) { p14dev->drawn_comps = op_pct->params.drawn_comps; } else { /* Draw everything. If this parameter was not set, clist does not fill it in. */ p14dev->drawn_comps = ( (gx_color_index) 1 << (p14dev->color_info.num_components)) - (gx_color_index) 1; } *pcdev = dev; return 0; } else return gx_no_create_compositor(dev, pcdev, pct, pgs, mem, cdev); }
0
[ "CWE-416" ]
ghostpdl
90fd0c7ca3efc1ddff64a86f4104b13b3ac969eb
209,835,271,155,774,300,000,000,000,000,000,000,000
34
Bug 697456. Dont create new ctx when pdf14 device reenabled This bug had yet another weird case where the user created a file that pushed the pdf14 device twice. We were in that case, creating a new ctx and blowing away the original one with out proper clean up. To avoid, only create a new one when we need it.
static void utf7_encode(FILE *sfd,long ch) { putc(base64[(ch>>18)&0x3f],sfd); putc(base64[(ch>>12)&0x3f],sfd); putc(base64[(ch>>6)&0x3f],sfd); putc(base64[ch&0x3f],sfd); }
0
[ "CWE-416" ]
fontforge
048a91e2682c1a8936ae34dbc7bd70291ec05410
10,400,436,269,032,889,000,000,000,000,000,000,000
7
Fix for #4084 Use-after-free (heap) in the SFD_GetFontMetaData() function Fix for #4086 NULL pointer dereference in the SFDGetSpiros() function Fix for #4088 NULL pointer dereference in the SFD_AssignLookups() function Add empty sf->fontname string if it isn't set, fixing #4089 #4090 and many other potential issues (many downstream calls to strlen() on the value).
MONGO_EXPORT gridfs_offset gridfile_get_contentlength( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "length" ); if( bson_iterator_type( &it ) == BSON_INT ) return ( gridfs_offset )bson_iterator_int( &it ); else return ( gridfs_offset )bson_iterator_long( &it ); }
0
[ "CWE-190" ]
mongo-c-driver-legacy
1a1f5e26a4309480d88598913f9eebf9e9cba8ca
66,217,323,986,732,520,000,000,000,000,000,000,000
10
don't mix up int and size_t (first pass to fix that)
virtual Item *in_predicate_to_in_subs_transformer(THD *thd, uchar *arg) { return this; }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
281,284,898,195,094,700,000,000,000,000,000,000,000
2
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
irc_server_create_buffer (struct t_irc_server *server) { char buffer_name[256], charset_modifier[256]; struct t_gui_buffer *ptr_buffer_for_merge; ptr_buffer_for_merge = NULL; switch (weechat_config_integer (irc_config_look_server_buffer)) { case IRC_CONFIG_LOOK_SERVER_BUFFER_MERGE_WITH_CORE: /* merge with WeeChat core buffer */ ptr_buffer_for_merge = weechat_buffer_search_main (); break; case IRC_CONFIG_LOOK_SERVER_BUFFER_MERGE_WITHOUT_CORE: /* find buffer used to merge all IRC server buffers */ ptr_buffer_for_merge = irc_buffer_search_first_for_all_servers (); break; } snprintf (buffer_name, sizeof (buffer_name), "server.%s", server->name); server->buffer = weechat_buffer_new (buffer_name, &irc_input_data_cb, NULL, &irc_buffer_close_cb, NULL); if (!server->buffer) return NULL; weechat_buffer_set (server->buffer, "short_name", server->name); weechat_buffer_set (server->buffer, "localvar_set_type", "server"); weechat_buffer_set (server->buffer, "localvar_set_server", server->name); weechat_buffer_set (server->buffer, "localvar_set_channel", server->name); snprintf (charset_modifier, sizeof (charset_modifier), "irc.%s", server->name); weechat_buffer_set (server->buffer, "localvar_set_charset_modifier", charset_modifier); weechat_hook_signal_send ("logger_backlog", WEECHAT_HOOK_SIGNAL_POINTER, server->buffer); if (weechat_config_boolean (irc_config_network_send_unknown_commands)) weechat_buffer_set (server->buffer, "input_get_unknown_commands", "1"); /* set highlights settings on server buffer */ weechat_buffer_set (server->buffer, "highlight_words_add", "$nick"); if (weechat_config_string (irc_config_look_highlight_tags) && weechat_config_string (irc_config_look_highlight_tags)[0]) { weechat_buffer_set (server->buffer, "highlight_tags", weechat_config_string (irc_config_look_highlight_tags)); } irc_server_set_buffer_title (server); /* merge buffer if needed */ if (ptr_buffer_for_merge) weechat_buffer_merge (server->buffer, ptr_buffer_for_merge); return server->buffer; }
0
[ "CWE-20" ]
weechat
c265cad1c95b84abfd4e8d861f25926ef13b5d91
331,549,926,097,939,900,000,000,000,000,000,000,000
58
Fix verification of SSL certificates by calling gnutls verify callback (patch #7459)
int timer_reduce(struct timer_list *timer, unsigned long expires) { return __mod_timer(timer, expires, MOD_TIMER_REDUCE); }
0
[ "CWE-200", "CWE-330" ]
linux
f227e3ec3b5cad859ad15666874405e8c1bbc1d4
334,641,326,413,409,120,000,000,000,000,000,000,000
4
random32: update the net random state on interrupt and activity This modifies the first 32 bits out of the 128 bits of a random CPU's net_rand_state on interrupt or CPU activity to complicate remote observations that could lead to guessing the network RNG's internal state. Note that depending on some network devices' interrupt rate moderation or binding, this re-seeding might happen on every packet or even almost never. In addition, with NOHZ some CPUs might not even get timer interrupts, leaving their local state rarely updated, while they are running networked processes making use of the random state. For this reason, we also perform this update in update_process_times() in order to at least update the state when there is user or system activity, since it's the only case we care about. Reported-by: Amit Klein <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: Eric Dumazet <[email protected]> Cc: "Jason A. Donenfeld" <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Kees Cook <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: <[email protected]> Signed-off-by: Willy Tarreau <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) { vs->vs_events_nr--; kfree(evt); }
0
[ "CWE-200", "CWE-119" ]
linux
59c816c1f24df0204e01851431d3bab3eb76719c
50,523,673,604,191,910,000,000,000,000,000,000,000
5
vhost/scsi: potential memory corruption This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt" to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16. I looked at the context and it turns out that in vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so anything higher than 255 then it is invalid. I have made that the limit now. In vhost_scsi_send_evt() we mask away values higher than 255, but now that the limit has changed, we don't need the mask. Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Nicholas Bellinger <[email protected]>
ReadNextFunctionHandle(mat_t *mat, matvar_t *matvar) { int err; size_t nelems = 1; err = SafeMulDims(matvar, &nelems); matvar->data_size = sizeof(matvar_t *); err |= SafeMul(&matvar->nbytes, nelems, matvar->data_size); if ( err ) return 0; matvar->data = malloc(matvar->nbytes); if ( matvar->data != NULL ) { size_t i; matvar_t **functions = (matvar_t**)matvar->data;; for ( i = 0; i < nelems; i++ ) functions[i] = Mat_VarReadNextInfo(mat); } else { matvar->data_size = 0; matvar->nbytes = 0; } return 0; }
0
[ "CWE-190", "CWE-401" ]
matio
5fa49ef9fc4368fe3d19b5fdaa36d8fa5e7f4606
30,236,150,576,142,420,000,000,000,000,000,000,000
24
Fix integer addition overflow As reported by https://github.com/tbeu/matio/issues/121
**/ T& operator()(const unsigned int x, const unsigned int y, const unsigned int z, const unsigned int c, const ulongT wh, const ulongT whd=0) { cimg::unused(wh,whd); return (*this)(x,y,z,c);
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
151,421,008,646,563,500,000,000,000,000,000,000,000
5
Fix other issues in 'CImg<T>::load_bmp()'.
int ssl3_write_pending(SSL *s, int type, const unsigned char *buf, unsigned int len) { int i; SSL3_BUFFER *wb = &(s->s3->wbuf); /* XXXX */ if ((s->s3->wpend_tot > (int)len) || ((s->s3->wpend_buf != buf) && !(s->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER)) || (s->s3->wpend_type != type)) { SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BAD_WRITE_RETRY); return (-1); } for (;;) { clear_sys_error(); if (s->wbio != NULL) { s->rwstate = SSL_WRITING; i = BIO_write(s->wbio, (char *)&(wb->buf[wb->offset]), (unsigned int)wb->left); } else { SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BIO_NOT_SET); i = -1; } if (i == wb->left) { wb->left = 0; wb->offset += i; s->rwstate = SSL_NOTHING; return (s->s3->wpend_ret); } else if (i <= 0) { if (s->version == DTLS1_VERSION || s->version == DTLS1_BAD_VER) { /* * For DTLS, just drop it. That's kind of the whole point in * using a datagram service */ wb->left = 0; } return (i); } wb->offset += i; wb->left -= i; } }
0
[ "CWE-17" ]
openssl
77c77f0a1b9f15b869ca3342186dfbedd1119d0e
275,526,522,545,930,300,000,000,000,000,000,000,000
45
Multiblock corrupted pointer fix OpenSSL 1.0.2 introduced the "multiblock" performance improvement. This feature only applies on 64 bit x86 architecture platforms that support AES NI instructions. A defect in the implementation of "multiblock" can cause OpenSSL's internal write buffer to become incorrectly set to NULL when using non-blocking IO. Typically, when the user application is using a socket BIO for writing, this will only result in a failed connection. However if some other BIO is used then it is likely that a segmentation fault will be triggered, thus enabling a potential DoS attack. CVE-2015-0290 Reviewed-by: Richard Levitte <[email protected]> Reviewed-by: Andy Polyakov <[email protected]>
save_image (const gchar *filename, gint32 image_ID, gint32 drawable_ID, GError **error) { g_message ("Saving not implemented yet"); return FALSE; }
0
[ "CWE-787" ]
gimp
48ec15890e1751dede061f6d1f469b6508c13439
312,416,511,491,547,100,000,000,000,000,000,000,000
9
file-psp: fix for bogus input data. Fixes bug #639203
static __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { int ret; /* Retain compatibility with failing for an invalid attach attempt */ if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == IORING_SETUP_ATTACH_WQ) { struct fd f; f = fdget(p->wq_fd); if (!f.file) return -ENXIO; if (f.file->f_op != &io_uring_fops) { fdput(f); return -EINVAL; } fdput(f); } if (ctx->flags & IORING_SETUP_SQPOLL) { struct task_struct *tsk; struct io_sq_data *sqd; bool attached; ret = security_uring_sqpoll(); if (ret) return ret; sqd = io_get_sq_data(p, &attached); if (IS_ERR(sqd)) { ret = PTR_ERR(sqd); goto err; } ctx->sq_creds = get_current_cred(); ctx->sq_data = sqd; ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); if (!ctx->sq_thread_idle) ctx->sq_thread_idle = HZ; io_sq_thread_park(sqd); list_add(&ctx->sqd_list, &sqd->ctx_list); io_sqd_update_thread_idle(sqd); /* don't attach to a dying SQPOLL thread, would be racy */ ret = (attached && !sqd->thread) ? -ENXIO : 0; io_sq_thread_unpark(sqd); if (ret < 0) goto err; if (attached) return 0; if (p->flags & IORING_SETUP_SQ_AFF) { int cpu = p->sq_thread_cpu; ret = -EINVAL; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) goto err_sqpoll; sqd->sq_cpu = cpu; } else { sqd->sq_cpu = -1; } sqd->task_pid = current->pid; sqd->task_tgid = current->tgid; tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); goto err_sqpoll; } sqd->thread = tsk; ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) goto err; } else if (p->flags & IORING_SETUP_SQ_AFF) { /* Can't have SQ_AFF without SQPOLL */ ret = -EINVAL; goto err; } return 0; err_sqpoll: complete(&ctx->sq_data->exited); err: io_sq_thread_finish(ctx); return ret;
0
[ "CWE-416" ]
linux
e677edbcabee849bfdd43f1602bccbecf736a646
109,152,554,091,015,780,000,000,000,000,000,000,000
89
io_uring: fix race between timeout flush and removal io_flush_timeouts() assumes the timeout isn't in progress of triggering or being removed/canceled, so it unconditionally removes it from the timeout list and attempts to cancel it. Leave it on the list and let the normal timeout cancelation take care of it. Cc: [email protected] # 5.5+ Signed-off-by: Jens Axboe <[email protected]>
static int php_iconv_stream_filter_append_bucket( php_iconv_stream_filter *self, php_stream *stream, php_stream_filter *filter, php_stream_bucket_brigade *buckets_out, const char *ps, size_t buf_len, size_t *consumed, int persistent) { php_stream_bucket *new_bucket; char *out_buf = NULL; size_t out_buf_size; char *pd, *pt; size_t ocnt, prev_ocnt, icnt, tcnt; size_t initial_out_buf_size; if (ps == NULL) { initial_out_buf_size = 64; icnt = 1; } else { initial_out_buf_size = buf_len; icnt = buf_len; } out_buf_size = ocnt = prev_ocnt = initial_out_buf_size; if (NULL == (out_buf = pemalloc(out_buf_size, persistent))) { return FAILURE; } pd = out_buf; if (self->stub_len > 0) { pt = self->stub; tcnt = self->stub_len; while (tcnt > 0) { if (iconv(self->cd, &pt, &tcnt, &pd, &ocnt) == (size_t)-1) { #if ICONV_SUPPORTS_ERRNO switch (errno) { case EILSEQ: php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): invalid multibyte sequence", self->from_charset, self->to_charset); goto out_failure; case EINVAL: if (ps != NULL) { if (icnt > 0) { if (self->stub_len >= sizeof(self->stub)) { php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): insufficient buffer", self->from_charset, self->to_charset); goto out_failure; } self->stub[self->stub_len++] = *(ps++); icnt--; pt = self->stub; tcnt = self->stub_len; } else { tcnt = 0; break; } } else { php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): invalid multibyte sequence", self->from_charset, self->to_charset); goto out_failure; } break; case E2BIG: { char *new_out_buf; size_t new_out_buf_size; new_out_buf_size = out_buf_size << 1; if (new_out_buf_size < out_buf_size) { /* whoa! no bigger buckets are sold anywhere... */ if (NULL == (new_bucket = php_stream_bucket_new(stream, out_buf, (out_buf_size - ocnt), 1, persistent))) { goto out_failure; } php_stream_bucket_append(buckets_out, new_bucket); out_buf_size = ocnt = initial_out_buf_size; if (NULL == (out_buf = pemalloc(out_buf_size, persistent))) { return FAILURE; } pd = out_buf; } else { if (NULL == (new_out_buf = perealloc(out_buf, new_out_buf_size, persistent))) { if (NULL == (new_bucket = php_stream_bucket_new(stream, out_buf, (out_buf_size - ocnt), 1, persistent))) { goto out_failure; } php_stream_bucket_append(buckets_out, new_bucket); return FAILURE; } pd = new_out_buf + (pd - out_buf); ocnt += (new_out_buf_size - out_buf_size); out_buf = new_out_buf; out_buf_size = new_out_buf_size; } } break; default: php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): unknown error", self->from_charset, self->to_charset); goto out_failure; } #else if (ocnt == prev_ocnt) { php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): unknown error", self->from_charset, self->to_charset); goto out_failure; } #endif } prev_ocnt = ocnt; } memmove(self->stub, pt, tcnt); self->stub_len = tcnt; } while (icnt > 0) { if ((ps == NULL ? iconv(self->cd, NULL, NULL, &pd, &ocnt): iconv(self->cd, (char **)&ps, &icnt, &pd, &ocnt)) == (size_t)-1) { #if ICONV_SUPPORTS_ERRNO switch (errno) { case EILSEQ: php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): invalid multibyte sequence", self->from_charset, self->to_charset); goto out_failure; case EINVAL: if (ps != NULL) { if (icnt > sizeof(self->stub)) { php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): insufficient buffer", self->from_charset, self->to_charset); goto out_failure; } memcpy(self->stub, ps, icnt); self->stub_len = icnt; ps += icnt; icnt = 0; } else { php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): unexpected octet values", self->from_charset, self->to_charset); goto out_failure; } break; case E2BIG: { char *new_out_buf; size_t new_out_buf_size; new_out_buf_size = out_buf_size << 1; if (new_out_buf_size < out_buf_size) { /* whoa! no bigger buckets are sold anywhere... */ if (NULL == (new_bucket = php_stream_bucket_new(stream, out_buf, (out_buf_size - ocnt), 1, persistent))) { goto out_failure; } php_stream_bucket_append(buckets_out, new_bucket); out_buf_size = ocnt = initial_out_buf_size; if (NULL == (out_buf = pemalloc(out_buf_size, persistent))) { return FAILURE; } pd = out_buf; } else { if (NULL == (new_out_buf = perealloc(out_buf, new_out_buf_size, persistent))) { if (NULL == (new_bucket = php_stream_bucket_new(stream, out_buf, (out_buf_size - ocnt), 1, persistent))) { goto out_failure; } php_stream_bucket_append(buckets_out, new_bucket); return FAILURE; } pd = new_out_buf + (pd - out_buf); ocnt += (new_out_buf_size - out_buf_size); out_buf = new_out_buf; out_buf_size = new_out_buf_size; } } break; default: php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): unknown error", self->from_charset, self->to_charset); goto out_failure; } #else if (ocnt == prev_ocnt) { php_error_docref(NULL, E_WARNING, "iconv stream filter (\"%s\"=>\"%s\"): unknown error", self->from_charset, self->to_charset); goto out_failure; } #endif } else { if (ps == NULL) { break; } } prev_ocnt = ocnt; } if (out_buf_size > ocnt) { if (NULL == (new_bucket = php_stream_bucket_new(stream, out_buf, (out_buf_size - ocnt), 1, persistent))) { goto out_failure; } php_stream_bucket_append(buckets_out, new_bucket); } else { pefree(out_buf, persistent); } *consumed += buf_len - icnt; return SUCCESS; out_failure: pefree(out_buf, persistent); return FAILURE; }
0
[ "CWE-125" ]
php-src
7cf7148a8f8f4f55fb04de2a517d740bb6253eac
5,608,409,767,806,426,000,000,000,000,000,000,000
208
Fix bug #78069 - Out-of-bounds read in iconv.c:_php_iconv_mime_decode() due to integer overflow
PHP_FUNCTION(imagecreate) { long x_size, y_size; gdImagePtr im; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ll", &x_size, &y_size) == FAILURE) { return; } if (x_size <= 0 || y_size <= 0 || x_size >= INT_MAX || y_size >= INT_MAX) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid image dimensions"); RETURN_FALSE; } im = gdImageCreate(x_size, y_size); if (!im) { RETURN_FALSE; } ZEND_REGISTER_RESOURCE(return_value, im, le_gd); }
0
[ "CWE-703", "CWE-189" ]
php-src
2938329ce19cb8c4197dec146c3ec887c6f61d01
43,696,924,350,741,490,000,000,000,000,000,000,000
22
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop()) And also fixed the bug: arguments are altered after some calls
*/ int register_netdev(struct net_device *dev) { int err; if (rtnl_lock_killable()) return -EINTR; err = register_netdevice(dev); rtnl_unlock(); return err;
0
[ "CWE-416" ]
linux
a4270d6795b0580287453ea55974d948393e66ef
268,682,229,120,321,500,000,000,000,000,000,000,000
10
net-gro: fix use-after-free read in napi_gro_frags() If a network driver provides to napi_gro_frags() an skb with a page fragment of exactly 14 bytes, the call to gro_pull_from_frag0() will 'consume' the fragment by calling skb_frag_unref(skb, 0), and the page might be freed and reused. Reading eth->h_proto at the end of napi_frags_skb() might read mangled data, or crash under specific debugging features. BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline] BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 Read of size 2 at addr ffff88809366840c by task syz-executor599/8957 CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188 __kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317 kasan_report+0x12/0x20 mm/kasan/common.c:614 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142 napi_frags_skb net/core/dev.c:5833 [inline] napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991 tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037 call_write_iter include/linux/fs.h:1872 [inline] do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693 do_iter_write fs/read_write.c:970 [inline] do_iter_write+0x184/0x610 fs/read_write.c:951 vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015 do_writev+0x15b/0x330 fs/read_write.c:1058 Fixes: a50e233c50db ("net-gro: restore frag0 optimization") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void sas_discover_domain(struct work_struct *work) { struct domain_device *dev; int error = 0; struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); if (port->port_dev) return; error = sas_get_port_device(port); if (error) return; dev = port->port_dev; pr_debug("DOING DISCOVERY on port %d, pid:%d\n", port->id, task_pid_nr(current)); switch (dev->dev_type) { case SAS_END_DEVICE: error = sas_discover_end_dev(dev); break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: error = sas_discover_root_expander(dev); break; case SAS_SATA_DEV: case SAS_SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; #else pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); /* Fall through */ #endif /* Fall through - only for the #else condition above. */ default: error = -ENXIO; pr_err("unhandled device %d\n", dev->dev_type); break; } if (error) { sas_rphy_free(dev->rphy); list_del_init(&dev->disco_list_node); spin_lock_irq(&port->dev_list_lock); list_del_init(&dev->dev_list_node); spin_unlock_irq(&port->dev_list_lock); sas_put_device(dev); port->port_dev = NULL; } sas_probe_devices(port); pr_debug("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, task_pid_nr(current), error); }
0
[ "CWE-476" ]
linux
f70267f379b5e5e11bdc5d72a56bf17e5feed01f
314,542,690,833,052,880,000,000,000,000,000,000,000
60
scsi: libsas: stop discovering if oob mode is disconnected The discovering of sas port is driven by workqueue in libsas. When libsas is processing port events or phy events in workqueue, new events may rise up and change the state of some structures such as asd_sas_phy. This may cause some problems such as follows: ==>thread 1 ==>thread 2 ==>phy up ==>phy_up_v3_hw() ==>oob_mode = SATA_OOB_MODE; ==>phy down quickly ==>hisi_sas_phy_down() ==>sas_ha->notify_phy_event() ==>sas_phy_disconnected() ==>oob_mode = OOB_NOT_CONNECTED ==>workqueue wakeup ==>sas_form_port() ==>sas_discover_domain() ==>sas_get_port_device() ==>oob_mode is OOB_NOT_CONNECTED and device is wrongly taken as expander This at last lead to the panic when libsas trying to issue a command to discover the device. [183047.614035] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000058 [183047.622896] Mem abort info: [183047.625762] ESR = 0x96000004 [183047.628893] Exception class = DABT (current EL), IL = 32 bits [183047.634888] SET = 0, FnV = 0 [183047.638015] EA = 0, S1PTW = 0 [183047.641232] Data abort info: [183047.644189] ISV = 0, ISS = 0x00000004 [183047.648100] CM = 0, WnR = 0 [183047.651145] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000b7df67be [183047.657834] [0000000000000058] pgd=0000000000000000 [183047.662789] Internal error: Oops: 96000004 [#1] SMP [183047.667740] Process kworker/u16:2 (pid: 31291, stack limit = 0x00000000417c4974) [183047.675208] CPU: 0 PID: 3291 Comm: kworker/u16:2 Tainted: G W OE 4.19.36-vhulk1907.1.0.h410.eulerosv2r8.aarch64 #1 [183047.687015] Hardware name: N/A N/A/Kunpeng Desktop Board D920S10, BIOS 0.15 10/22/2019 [183047.695007] Workqueue: 0000:74:02.0_disco_q sas_discover_domain [183047.700999] pstate: 20c00009 (nzCv daif +PAN +UAO) [183047.705864] pc : prep_ata_v3_hw+0xf8/0x230 [hisi_sas_v3_hw] [183047.711510] lr : prep_ata_v3_hw+0xb0/0x230 [hisi_sas_v3_hw] [183047.717153] sp : ffff00000f28ba60 [183047.720541] x29: ffff00000f28ba60 x28: ffff8026852d7228 [183047.725925] x27: ffff8027dba3e0a8 x26: ffff8027c05fc200 [183047.731310] x25: 0000000000000000 x24: ffff8026bafa8dc0 [183047.736695] x23: ffff8027c05fc218 x22: ffff8026852d7228 [183047.742079] x21: ffff80007c2f2940 x20: ffff8027c05fc200 [183047.747464] x19: 0000000000f80800 x18: 0000000000000010 [183047.752848] x17: 0000000000000000 x16: 0000000000000000 [183047.758232] x15: ffff000089a5a4ff x14: 0000000000000005 [183047.763617] x13: ffff000009a5a50e x12: ffff8026bafa1e20 [183047.769001] x11: ffff0000087453b8 x10: ffff00000f28b870 [183047.774385] x9 : 0000000000000000 x8 : ffff80007e58f9b0 [183047.779770] x7 : 0000000000000000 x6 : 000000000000003f [183047.785154] x5 : 0000000000000040 x4 : ffffffffffffffe0 [183047.790538] x3 : 00000000000000f8 x2 : 0000000002000007 [183047.795922] x1 : 0000000000000008 x0 : 0000000000000000 [183047.801307] Call trace: [183047.803827] prep_ata_v3_hw+0xf8/0x230 [hisi_sas_v3_hw] [183047.809127] hisi_sas_task_prep+0x750/0x888 [hisi_sas_main] [183047.814773] hisi_sas_task_exec.isra.7+0x88/0x1f0 [hisi_sas_main] [183047.820939] hisi_sas_queue_command+0x28/0x38 [hisi_sas_main] [183047.826757] smp_execute_task_sg+0xec/0x218 [183047.831013] smp_execute_task+0x74/0xa0 [183047.834921] sas_discover_expander.part.7+0x9c/0x5f8 [183047.839959] sas_discover_root_expander+0x90/0x160 [183047.844822] sas_discover_domain+0x1b8/0x1e8 [183047.849164] process_one_work+0x1b4/0x3f8 [183047.853246] worker_thread+0x54/0x470 [183047.856981] kthread+0x134/0x138 [183047.860283] ret_from_fork+0x10/0x18 [183047.863931] Code: f9407a80 528000e2 39409281 72a04002 (b9405800) [183047.870097] kernel fault(0x1) notification starting on CPU 0 [183047.875828] kernel fault(0x1) notification finished on CPU 0 [183047.881559] Modules linked in: unibsp(OE) hns3(OE) hclge(OE) hnae3(OE) mem_drv(OE) hisi_sas_v3_hw(OE) hisi_sas_main(OE) [183047.892418] ---[ end trace 4cc26083fc11b783 ]--- [183047.897107] Kernel panic - not syncing: Fatal exception [183047.902403] kernel fault(0x5) notification starting on CPU 0 [183047.908134] kernel fault(0x5) notification finished on CPU 0 [183047.913865] SMP: stopping secondary CPUs [183047.917861] Kernel Offset: disabled [183047.921422] CPU features: 0x2,a2a00a38 [183047.925243] Memory Limit: none [183047.928372] kernel reboot(0x2) notification starting on CPU 0 [183047.934190] kernel reboot(0x2) notification finished on CPU 0 [183047.940008] ---[ end Kernel panic - not syncing: Fatal exception ]--- Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver") Link: https://lore.kernel.org/r/[email protected] Reported-by: Gao Chuan <[email protected]> Reviewed-by: John Garry <[email protected]> Signed-off-by: Jason Yan <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
nautilus_file_set_permissions (NautilusFile *file, guint32 new_permissions, NautilusFileOperationCallback callback, gpointer callback_data) { GFileInfo *info; GError *error; if (!nautilus_file_can_set_permissions (file)) { /* Claim that something changed even if the permission change failed. * This makes it easier for some clients who see the "reverting" * to the old permissions as "changing back". */ nautilus_file_changed (file); error = g_error_new (G_IO_ERROR, G_IO_ERROR_PERMISSION_DENIED, _("Not allowed to set permissions")); (* callback) (file, NULL, error, callback_data); g_error_free (error); return; } /* Test the permissions-haven't-changed case explicitly * because we don't want to send the file-changed signal if * nothing changed. */ if (new_permissions == file->details->permissions) { (* callback) (file, NULL, NULL, callback_data); return; } info = g_file_info_new (); g_file_info_set_attribute_uint32 (info, G_FILE_ATTRIBUTE_UNIX_MODE, new_permissions); nautilus_file_set_attributes (file, info, callback, callback_data); g_object_unref (info); }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
21,244,677,630,862,138,000,000,000,000,000,000,000
35
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
TEST_F(QueryPlannerTest, IntersectElemMatch) { params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION; addIndex(BSON("a.b" << 1)); addIndex(BSON("a.c" << 1)); runQuery(fromjson("{a : {$elemMatch: {b:1, c:1}}}")); assertSolutionExists( "{fetch: {filter: {a:{$elemMatch:{b:1, c:1}}}," "node: {andSorted: {nodes: [" "{ixscan: {filter: null, pattern: {'a.b':1}}}," "{ixscan: {filter: null, pattern: {'a.c':1}}}]}}}}"); }
0
[]
mongo
ee97c0699fd55b498310996ee002328e533681a3
320,964,980,053,163,160,000,000,000,000,000,000,000
11
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
static bool io_register_op_must_quiesce(int op) { switch (op) { case IORING_REGISTER_BUFFERS: case IORING_UNREGISTER_BUFFERS: case IORING_REGISTER_FILES: case IORING_UNREGISTER_FILES: case IORING_REGISTER_FILES_UPDATE: case IORING_REGISTER_PROBE: case IORING_REGISTER_PERSONALITY: case IORING_UNREGISTER_PERSONALITY: case IORING_REGISTER_FILES2: case IORING_REGISTER_FILES_UPDATE2: case IORING_REGISTER_BUFFERS2: case IORING_REGISTER_BUFFERS_UPDATE: case IORING_REGISTER_IOWQ_AFF: case IORING_UNREGISTER_IOWQ_AFF: return false; default: return true; }
0
[ "CWE-125" ]
linux
89c2b3b74918200e46699338d7bcc19b1ea12110
78,795,314,303,364,820,000,000,000,000,000,000,000
22
io_uring: reexpand under-reexpanded iters [ 74.211232] BUG: KASAN: stack-out-of-bounds in iov_iter_revert+0x809/0x900 [ 74.212778] Read of size 8 at addr ffff888025dc78b8 by task syz-executor.0/828 [ 74.214756] CPU: 0 PID: 828 Comm: syz-executor.0 Not tainted 5.14.0-rc3-next-20210730 #1 [ 74.216525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [ 74.219033] Call Trace: [ 74.219683] dump_stack_lvl+0x8b/0xb3 [ 74.220706] print_address_description.constprop.0+0x1f/0x140 [ 74.224226] kasan_report.cold+0x7f/0x11b [ 74.226085] iov_iter_revert+0x809/0x900 [ 74.227960] io_write+0x57d/0xe40 [ 74.232647] io_issue_sqe+0x4da/0x6a80 [ 74.242578] __io_queue_sqe+0x1ac/0xe60 [ 74.245358] io_submit_sqes+0x3f6e/0x76a0 [ 74.248207] __do_sys_io_uring_enter+0x90c/0x1a20 [ 74.257167] do_syscall_64+0x3b/0x90 [ 74.257984] entry_SYSCALL_64_after_hwframe+0x44/0xae old_size = iov_iter_count(); ... iov_iter_revert(old_size - iov_iter_count()); If iov_iter_revert() is done base on the initial size as above, and the iter is truncated and not reexpanded in the middle, it miscalculates borders causing problems. This trace is due to no one reexpanding after generic_write_checks(). Now iters store how many bytes has been truncated, so reexpand them to the initial state right before reverting. Cc: [email protected] Reported-by: Palash Oswal <[email protected]> Reported-by: Sudip Mukherjee <[email protected]> Reported-and-tested-by: [email protected] Signed-off-by: Pavel Begunkov <[email protected]> Signed-off-by: Al Viro <[email protected]>
Formattable::setDouble(double d) { dispose(); fType = kDouble; fValue.fDouble = d; }
0
[ "CWE-190" ]
icu
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
25,138,677,024,186,845,000,000,000,000,000,000,000
6
ICU-20246 Fixing another integer overflow in number parsing.
int PCS2ITU(register const cmsUInt16Number In[], register cmsUInt16Number Out[], register void* Cargo) { cmsCIELab Lab; cmsLabEncoded2Float(&Lab, In); cmsDesaturateLab(&Lab, 85, -85, 125, -75); // This function does the necessary gamut remapping Lab2ITU(&Lab, Out); return TRUE; UTILS_UNUSED_PARAMETER(Cargo); }
0
[]
Little-CMS
06d4557477e7ab3330a24d69af4c67adcac9acdf
22,665,857,770,647,160,000,000,000,000,000,000,000
11
utils/jpgicc/jpgicc.c: Fix fprintf parameter number
WandExport void DrawSetClipUnits(DrawingWand *wand, const ClipPathUnits clip_units) { assert(wand != (DrawingWand *) NULL); assert(wand->signature == MagickWandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->filter_off != MagickFalse) || (CurrentContext->clip_units != clip_units)) { CurrentContext->clip_units=clip_units; if (clip_units == ObjectBoundingBox) { AffineMatrix affine; GetAffineMatrix(&affine); affine.sx=CurrentContext->bounds.x2; affine.sy=CurrentContext->bounds.y2; affine.tx=CurrentContext->bounds.x1; affine.ty=CurrentContext->bounds.y1; AdjustAffine(wand,&affine); } (void) MVGPrintf(wand, "clip-units '%s'\n",CommandOptionToMnemonic( MagickClipPathOptions,(ssize_t) clip_units)); } }
0
[ "CWE-476" ]
ImageMagick
6ad5fc3c9b652eec27fc0b1a0817159f8547d5d9
96,223,194,873,559,200,000,000,000,000,000,000,000
27
https://github.com/ImageMagick/ImageMagick/issues/716
void Type_Measurement_Free(struct _cms_typehandler_struct* self, void* Ptr) { _cmsFree(self ->ContextID, Ptr); }
0
[]
Little-CMS
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
71,578,073,363,457,140,000,000,000,000,000,000,000
4
Memory squeezing fix: lcms2 cmsPipeline construction When creating a new pipeline, lcms would often try to allocate a stage and pass it to cmsPipelineInsertStage without checking whether the allocation succeeded. cmsPipelineInsertStage would then assert (or crash) if it had not. The fix here is to change cmsPipelineInsertStage to check and return an error value. All calling code is then checked to test this return value and cope.
bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; *sockaddr_url = url; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; snprintf(url_address, 254, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); *sockaddr_port = strdup(port); *sockaddr_url = strdup(url_address); return true; }
0
[ "CWE-119", "CWE-787" ]
cgminer
e1c5050734123973b99d181c45e74b2cbb00272e
24,800,190,001,463,534,000,000,000,000,000,000,000
49
Do some random sanity checking for stratum message parsing
day_to_ns(VALUE d) { return f_mul(d, day_in_nanoseconds); }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
86,547,987,986,076,430,000,000,000,000,000,000,000
4
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
rx_queue_count(struct mlx5_rxq_data *rxq) { struct rxq_zip *zip = &rxq->zip; volatile struct mlx5_cqe *cqe; const unsigned int cqe_n = (1 << rxq->cqe_n); const unsigned int sges_n = (1 << rxq->sges_n); const unsigned int elts_n = (1 << rxq->elts_n); const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num); const unsigned int cqe_cnt = cqe_n - 1; unsigned int cq_ci, used; /* if we are processing a compressed cqe */ if (zip->ai) { used = zip->cqe_cnt - zip->ai; cq_ci = zip->cq_ci; } else { used = 0; cq_ci = rxq->cq_ci; } cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) { int8_t op_own; unsigned int n; op_own = cqe->op_own; if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) n = rte_be_to_cpu_32(cqe->byte_cnt); else n = 1; cq_ci += n; used += n; cqe = &(*rxq->cqes)[cq_ci & cqe_cnt]; } used = RTE_MIN(used * sges_n, elts_n * strd_n); return used; }
0
[]
dpdk
60b254e3923d007bcadbb8d410f95ad89a2f13fa
326,442,329,643,772,130,000,000,000,000,000,000,000
36
net/mlx5: fix Rx queue recovery mechanism The local variables are getting inconsistent in data receiving routines after queue error recovery. Receive queue consumer index is getting wrong, need to reset one to the size of the queue (as RQ was fully replenished in recovery procedure). In MPRQ case, also the local consumed strd variable should be reset. CVE-2022-28199 Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error handling") Cc: [email protected] Signed-off-by: Alexander Kozyrev <[email protected]> Signed-off-by: Matan Azrad <[email protected]>
static int ZEND_FASTCALL ZEND_RETURN_SPEC_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zval *retval_ptr; zval **retval_ptr_ptr; if (EG(active_op_array)->return_reference == ZEND_RETURN_REF) { if (IS_CONST == IS_CONST || IS_CONST == IS_TMP_VAR) { /* Not supposed to happen, but we'll allow it */ zend_error(E_NOTICE, "Only variable references should be returned by reference"); goto return_by_value; } retval_ptr_ptr = NULL; if (IS_CONST == IS_VAR && !retval_ptr_ptr) { zend_error_noreturn(E_ERROR, "Cannot return string offsets by reference"); } if (IS_CONST == IS_VAR && !Z_ISREF_PP(retval_ptr_ptr)) { if (opline->extended_value == ZEND_RETURNS_FUNCTION && EX_T(opline->op1.u.var).var.fcall_returned_reference) { } else if (EX_T(opline->op1.u.var).var.ptr_ptr == &EX_T(opline->op1.u.var).var.ptr) { if (IS_CONST == IS_VAR && !0) { PZVAL_LOCK(*retval_ptr_ptr); /* undo the effect of get_zval_ptr_ptr() */ } zend_error(E_NOTICE, "Only variable references should be returned by reference"); goto return_by_value; } } if (EG(return_value_ptr_ptr)) { SEPARATE_ZVAL_TO_MAKE_IS_REF(retval_ptr_ptr); Z_ADDREF_PP(retval_ptr_ptr); (*EG(return_value_ptr_ptr)) = (*retval_ptr_ptr); } } else { return_by_value: retval_ptr = &opline->op1.u.constant; if (!EG(return_value_ptr_ptr)) { if (IS_CONST == IS_TMP_VAR) { } } else if (!0) { /* Not a temp var */ if (IS_CONST == IS_CONST || EG(active_op_array)->return_reference == ZEND_RETURN_REF || (PZVAL_IS_REF(retval_ptr) && Z_REFCOUNT_P(retval_ptr) > 0)) { zval *ret; ALLOC_ZVAL(ret); INIT_PZVAL_COPY(ret, retval_ptr); zval_copy_ctor(ret); *EG(return_value_ptr_ptr) = ret; } else if ((IS_CONST == IS_CV || IS_CONST == IS_VAR) && retval_ptr == &EG(uninitialized_zval)) { zval *ret; ALLOC_INIT_ZVAL(ret); *EG(return_value_ptr_ptr) = ret; } else { *EG(return_value_ptr_ptr) = retval_ptr; Z_ADDREF_P(retval_ptr); } } else { zval *ret; ALLOC_ZVAL(ret); INIT_PZVAL_COPY(ret, retval_ptr); *EG(return_value_ptr_ptr) = ret; } } return zend_leave_helper_SPEC(ZEND_OPCODE_HANDLER_ARGS_PASSTHRU); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
219,177,505,469,514,800,000,000,000,000,000,000,000
79
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
ins_down( int startcol) // when TRUE move to Insstart.col { pos_T tpos; linenr_T old_topline = curwin->w_topline; #ifdef FEAT_DIFF int old_topfill = curwin->w_topfill; #endif undisplay_dollar(); tpos = curwin->w_cursor; if (cursor_down(1L, TRUE) == OK) { if (startcol) coladvance(getvcol_nolist(&Insstart)); if (old_topline != curwin->w_topline #ifdef FEAT_DIFF || old_topfill != curwin->w_topfill #endif ) redraw_later(VALID); start_arrow(&tpos); #ifdef FEAT_CINDENT can_cindent = TRUE; #endif } else vim_beep(BO_CRSR); }
0
[]
vim
98a336dd497d3422e7efeef9f24cc9e25aeb8a49
139,517,121,434,861,620,000,000,000,000,000,000,000
29
patch 8.2.0133: invalid memory access with search command Problem: Invalid memory access with search command. Solution: When :normal runs out of characters in bracketed paste mode break out of the loop.(closes #5511)
inline uint8_t* WireFormatLite::WriteDoubleToArray( int field_number, const RepeatedField<double>& value, uint8_t* target) { return WritePrimitiveToArray(field_number, value, WriteDoubleToArray, target); }
0
[ "CWE-703" ]
protobuf
d1635e1496f51e0d5653d856211e8821bc47adc4
317,329,573,506,816,280,000,000,000,000,000,000,000
4
Apply patch
int ldb_match_msg(struct ldb_context *ldb, const struct ldb_message *msg, const struct ldb_parse_tree *tree, struct ldb_dn *base, enum ldb_scope scope) { bool matched; int ret; if ( ! ldb_match_scope(ldb, base, msg->dn, scope) ) { return 0; } ret = ldb_match_message(ldb, msg, tree, scope, &matched); if (ret != LDB_SUCCESS) { /* to match the old API, we need to consider this a failure to match */ return 0; } return matched?1:0; }
0
[ "CWE-189" ]
samba
ec504dbf69636a554add1f3d5703dd6c3ad450b8
53,112,552,518,686,830,000,000,000,000,000,000,000
21
CVE-2015-3223: lib: ldb: Cope with canonicalise_fn returning string "", length 0. BUG: https://bugzilla.samba.org/show_bug.cgi?id=11325 Signed-off-by: Jeremy Allison <[email protected]> Reviewed-by: Ralph Boehme <[email protected]>
static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_bundles, const struct xfrm_kmaddress *k) { int i; int sasize_sel; int size = 0; int size_pol = 0; struct sk_buff *skb; struct sadb_msg *hdr; struct sadb_x_policy *pol; const struct xfrm_migrate *mp; if (type != XFRM_POLICY_TYPE_MAIN) return 0; if (num_bundles <= 0 || num_bundles > XFRM_MAX_DEPTH) return -EINVAL; if (k != NULL) { /* addresses for KM */ size += PFKEY_ALIGN8(sizeof(struct sadb_x_kmaddress) + pfkey_sockaddr_pair_size(k->family)); } /* selector */ sasize_sel = pfkey_sockaddr_size(sel->family); if (!sasize_sel) return -EINVAL; size += (sizeof(struct sadb_address) + sasize_sel) * 2; /* policy info */ size_pol += sizeof(struct sadb_x_policy); /* ipsecrequests */ for (i = 0, mp = m; i < num_bundles; i++, mp++) { /* old locator pair */ size_pol += sizeof(struct sadb_x_ipsecrequest) + pfkey_sockaddr_pair_size(mp->old_family); /* new locator pair */ size_pol += sizeof(struct sadb_x_ipsecrequest) + pfkey_sockaddr_pair_size(mp->new_family); } size += sizeof(struct sadb_msg) + size_pol; /* alloc buffer */ skb = alloc_skb(size, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; hdr = (struct sadb_msg *)skb_put(skb, sizeof(struct sadb_msg)); hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_type = SADB_X_MIGRATE; hdr->sadb_msg_satype = pfkey_proto2satype(m->proto); hdr->sadb_msg_len = size / 8; hdr->sadb_msg_errno = 0; hdr->sadb_msg_reserved = 0; hdr->sadb_msg_seq = 0; hdr->sadb_msg_pid = 0; /* Addresses to be used by KM for negotiation, if ext is available */ if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) goto err; /* selector src */ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel); /* selector dst */ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel); /* policy information */ pol = (struct sadb_x_policy *)skb_put(skb, sizeof(struct sadb_x_policy)); pol->sadb_x_policy_len = size_pol / 8; pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; pol->sadb_x_policy_dir = dir + 1; pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = 0; pol->sadb_x_policy_priority = 0; for (i = 0, mp = m; i < num_bundles; i++, mp++) { /* old ipsecrequest */ int mode = pfkey_mode_from_xfrm(mp->mode); if (mode < 0) goto err; if (set_ipsecrequest(skb, mp->proto, mode, (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), mp->reqid, mp->old_family, &mp->old_saddr, &mp->old_daddr) < 0) goto err; /* new ipsecrequest */ if (set_ipsecrequest(skb, mp->proto, mode, (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), mp->reqid, mp->new_family, &mp->new_saddr, &mp->new_daddr) < 0) goto err; } /* broadcast migrate message to sockets */ pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); return 0; err: kfree_skb(skb); return -EINVAL; }
0
[]
linux
096f41d3a8fcbb8dde7f71379b1ca85fe213eded
208,678,437,282,559,600,000,000,000,000,000,000,000
109
af_key: Fix sadb_x_ipsecrequest parsing The parsing of sadb_x_ipsecrequest is broken in a number of ways. First of all we're not verifying sadb_x_ipsecrequest_len. This is needed when the structure carries addresses at the end. Worse we don't even look at the length when we parse those optional addresses. The migration code had similar parsing code that's better but it also has some deficiencies. The length is overcounted first of all as it includes the header itself. It also fails to check the length before dereferencing the sa_family field. This patch fixes those problems in parse_sockaddr_pair and then uses it in parse_ipsecrequest. Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
irc_server_strcasecmp (struct t_irc_server *server, const char *string1, const char *string2) { int casemapping, rc; casemapping = (server) ? server->casemapping : IRC_SERVER_CASEMAPPING_RFC1459; switch (casemapping) { case IRC_SERVER_CASEMAPPING_RFC1459: rc = weechat_strcasecmp_range (string1, string2, 30); break; case IRC_SERVER_CASEMAPPING_STRICT_RFC1459: rc = weechat_strcasecmp_range (string1, string2, 29); break; case IRC_SERVER_CASEMAPPING_ASCII: rc = weechat_strcasecmp (string1, string2); break; default: rc = weechat_strcasecmp_range (string1, string2, 30); break; } return rc; }
0
[ "CWE-120", "CWE-787" ]
weechat
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
308,558,232,004,930,800,000,000,000,000,000,000,000
23
irc: fix crash when a new message 005 is received with longer nick prefixes Thanks to Stuart Nevans Locke for reporting the issue.
static struct mobj *alloc_ta_mem(size_t size) { #ifdef CFG_PAGED_USER_TA return mobj_paged_alloc(size); #else struct mobj *mobj = mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr); if (mobj) { size_t granularity = BIT(tee_mm_sec_ddr.shift); /* Round up to allocation granularity size */ memset(mobj_get_va(mobj, 0), 0, ROUNDUP(size, granularity)); } return mobj; #endif }
0
[ "CWE-703", "CWE-189" ]
optee_os
7e768f8a473409215fe3fff8f6e31f8a3a0103c6
253,440,491,689,457,600,000,000,000,000,000,000,000
16
core: clear the entire TA area Previously we cleared (memset to zero) the size corresponding to code and data segments, however the allocation for the TA is made on the granularity of the memory pool, meaning that we did not clear all memory and because of that we could potentially leak code and data of a previous loaded TA. Fixes: OP-TEE-2018-0006: "Potential disclosure of previously loaded TA code and data" Signed-off-by: Joakim Bech <[email protected]> Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8) Suggested-by: Jens Wiklander <[email protected]> Reviewed-by: Jens Wiklander <[email protected]> Reported-by: Riscure <[email protected]> Reported-by: Alyssa Milburn <[email protected]> Acked-by: Etienne Carriere <[email protected]>
static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, ev->role, le16_to_cpu(ev->handle), le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); }
0
[ "CWE-290" ]
linux
3ca44c16b0dcc764b641ee4ac226909f5c421aa3
14,138,004,541,417,597,000,000,000,000,000,000,000
13
Bluetooth: Consolidate encryption handling in hci_encrypt_cfm This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection state is BT_CONFIG so callers don't have to check the state. Signed-off-by: Luiz Augusto von Dentz <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]>
STATIC SV* S__make_exactf_invlist(pTHX_ RExC_state_t *pRExC_state, regnode *node) { dVAR; const U8 * s = (U8*)STRING(node); SSize_t bytelen = STR_LEN(node); UV uc; /* Start out big enough for 2 separate code points */ SV* invlist = _new_invlist(4); PERL_ARGS_ASSERT__MAKE_EXACTF_INVLIST; if (! UTF) { uc = *s; /* We punt and assume can match anything if the node begins * with a multi-character fold. Things are complicated. For * example, /ffi/i could match any of: * "\N{LATIN SMALL LIGATURE FFI}" * "\N{LATIN SMALL LIGATURE FF}I" * "F\N{LATIN SMALL LIGATURE FI}" * plus several other things; and making sure we have all the * possibilities is hard. */ if (is_MULTI_CHAR_FOLD_latin1_safe(s, s + bytelen)) { invlist = _add_range_to_invlist(invlist, 0, UV_MAX); } else { /* Any Latin1 range character can potentially match any * other depending on the locale, and in Turkic locales, U+130 and * U+131 */ if (OP(node) == EXACTFL) { _invlist_union(invlist, PL_Latin1, &invlist); invlist = add_cp_to_invlist(invlist, LATIN_SMALL_LETTER_DOTLESS_I); invlist = add_cp_to_invlist(invlist, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE); } else { /* But otherwise, it matches at least itself. We can * quickly tell if it has a distinct fold, and if so, * it matches that as well */ invlist = add_cp_to_invlist(invlist, uc); if (IS_IN_SOME_FOLD_L1(uc)) invlist = add_cp_to_invlist(invlist, PL_fold_latin1[uc]); } /* Some characters match above-Latin1 ones under /i. This * is true of EXACTFL ones when the locale is UTF-8 */ if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(uc) && (! isASCII(uc) || (OP(node) != EXACTFAA && OP(node) != EXACTFAA_NO_TRIE))) { add_above_Latin1_folds(pRExC_state, (U8) uc, &invlist); } } } else { /* Pattern is UTF-8 */ U8 folded[UTF8_MAX_FOLD_CHAR_EXPAND * UTF8_MAXBYTES_CASE + 1] = { '\0' }; const U8* e = s + bytelen; IV fc; fc = uc = utf8_to_uvchr_buf(s, s + bytelen, NULL); /* The only code points that aren't folded in a UTF EXACTFish * node are are the problematic ones in EXACTFL nodes */ if (OP(node) == EXACTFL && is_PROBLEMATIC_LOCALE_FOLDEDS_START_cp(uc)) { /* We need to check for the possibility that this EXACTFL * node begins with a multi-char fold. Therefore we fold * the first few characters of it so that we can make that * check */ U8 *d = folded; int i; fc = -1; for (i = 0; i < UTF8_MAX_FOLD_CHAR_EXPAND && s < e; i++) { if (isASCII(*s)) { *(d++) = (U8) toFOLD(*s); if (fc < 0) { /* Save the first fold */ fc = *(d-1); } s++; } else { STRLEN len; UV fold = toFOLD_utf8_safe(s, e, d, &len); if (fc < 0) { /* Save the first fold */ fc = fold; } d += len; s += UTF8SKIP(s); } } /* And set up so the code below that looks in this folded * buffer instead of the node's string */ e = d; s = folded; } /* When we reach here 's' points to the fold of the first * character(s) of the node; and 'e' points to far enough along * the folded string to be just past any possible multi-char * fold. * * Unlike the non-UTF-8 case, the macro for determining if a * string is a multi-char fold requires all the characters to * already be folded. This is because of all the complications * if not. Note that they are folded anyway, except in EXACTFL * nodes. Like the non-UTF case above, we punt if the node * begins with a multi-char fold */ if (is_MULTI_CHAR_FOLD_utf8_safe(s, e)) { invlist = _add_range_to_invlist(invlist, 0, UV_MAX); } else { /* Single char fold */ unsigned int k; unsigned int first_fold; const unsigned int * remaining_folds; Size_t folds_count; /* It matches itself */ invlist = add_cp_to_invlist(invlist, fc); /* ... plus all the things that fold to it, which are found in * PL_utf8_foldclosures */ folds_count = _inverse_folds(fc, &first_fold, &remaining_folds); for (k = 0; k < folds_count; k++) { UV c = (k == 0) ? first_fold : remaining_folds[k-1]; /* /aa doesn't allow folds between ASCII and non- */ if ( (OP(node) == EXACTFAA || OP(node) == EXACTFAA_NO_TRIE) && isASCII(c) != isASCII(fc)) { continue; } invlist = add_cp_to_invlist(invlist, c); } if (OP(node) == EXACTFL) { /* If either [iI] are present in an EXACTFL node the above code * should have added its normal case pair, but under a Turkish * locale they could match instead the case pairs from it. Add * those as potential matches as well */ if (isALPHA_FOLD_EQ(fc, 'I')) { invlist = add_cp_to_invlist(invlist, LATIN_SMALL_LETTER_DOTLESS_I); invlist = add_cp_to_invlist(invlist, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE); } else if (fc == LATIN_SMALL_LETTER_DOTLESS_I) { invlist = add_cp_to_invlist(invlist, 'I'); } else if (fc == LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE) { invlist = add_cp_to_invlist(invlist, 'i'); } } } } return invlist;
0
[ "CWE-190", "CWE-787" ]
perl5
897d1f7fd515b828e4b198d8b8bef76c6faf03ed
192,264,483,499,872,000,000,000,000,000,000,000,000
163
regcomp.c: Prevent integer overflow from nested regex quantifiers. (CVE-2020-10543) On 32bit systems the size calculations for nested regular expression quantifiers could overflow causing heap memory corruption. Fixes: Perl/perl5-security#125 (cherry picked from commit bfd31397db5dc1a5c5d3e0a1f753a4f89a736e71)
new_label(codegen_scope *s) { return s->lastlabel = s->pc; }
0
[ "CWE-415", "CWE-122" ]
mruby
38b164ace7d6ae1c367883a3d67d7f559783faad
38,663,038,145,576,404,000,000,000,000,000,000,000
4
codegen.c: fix a bug in `gen_values()`. - Fix limit handling that fails 15 arguments method calls. - Fix too early argument packing in arrays.
static int lstat_cache(struct cache_def *cache, const char *name, int len, int track_flags, int prefix_len_stat_func) { int flags; (void)lstat_cache_matchlen(cache, name, len, &flags, track_flags, prefix_len_stat_func); return flags; }
0
[ "CWE-59", "CWE-61" ]
git
684dd4c2b414bcf648505e74498a608f28de4592
117,855,778,376,140,780,000,000,000,000,000,000,000
8
checkout: fix bug that makes checkout follow symlinks in leading path Before checking out a file, we have to confirm that all of its leading components are real existing directories. And to reduce the number of lstat() calls in this process, we cache the last leading path known to contain only directories. However, when a path collision occurs (e.g. when checking out case-sensitive files in case-insensitive file systems), a cached path might have its file type changed on disk, leaving the cache on an invalid state. Normally, this doesn't bring any bad consequences as we usually check out files in index order, and therefore, by the time the cached path becomes outdated, we no longer need it anyway (because all files in that directory would have already been written). But, there are some users of the checkout machinery that do not always follow the index order. In particular: checkout-index writes the paths in the same order that they appear on the CLI (or stdin); and the delayed checkout feature -- used when a long-running filter process replies with "status=delayed" -- postpones the checkout of some entries, thus modifying the checkout order. When we have to check out an out-of-order entry and the lstat() cache is invalid (due to a previous path collision), checkout_entry() may end up using the invalid data and thrusting that the leading components are real directories when, in reality, they are not. In the best case scenario, where the directory was replaced by a regular file, the user will get an error: "fatal: unable to create file 'foo/bar': Not a directory". But if the directory was replaced by a symlink, checkout could actually end up following the symlink and writing the file at a wrong place, even outside the repository. Since delayed checkout is affected by this bug, it could be used by an attacker to write arbitrary files during the clone of a maliciously crafted repository. Some candidate solutions considered were to disable the lstat() cache during unordered checkouts or sort the entries before passing them to the checkout machinery. But both ideas include some performance penalty and they don't future-proof the code against new unordered use cases. Instead, we now manually reset the lstat cache whenever we successfully remove a directory. Note: We are not even checking whether the directory was the same as the lstat cache points to because we might face a scenario where the paths refer to the same location but differ due to case folding, precomposed UTF-8 issues, or the presence of `..` components in the path. Two regression tests, with case-collisions and utf8-collisions, are also added for both checkout-index and delayed checkout. Note: to make the previously mentioned clone attack unfeasible, it would be sufficient to reset the lstat cache only after the remove_subtree() call inside checkout_entry(). This is the place where we would remove a directory whose path collides with the path of another entry that we are currently trying to check out (possibly a symlink). However, in the interest of a thorough fix that does not leave Git open to similar-but-not-identical attack vectors, we decided to intercept all `rmdir()` calls in one fell swoop. This addresses CVE-2021-21300. Co-authored-by: Johannes Schindelin <[email protected]> Signed-off-by: Matheus Tavares <[email protected]>
u32toutf16 (c, s) u_bits32_t c; unsigned short *s; { int l; l = 0; if (c < 0x0d800) { s[0] = (unsigned short) (c & 0xFFFF); l = 1; } else if (c >= 0x0e000 && c <= 0x010ffff) { c -= 0x010000; s[0] = (unsigned short)((c >> 10) + 0xd800); s[1] = (unsigned short)((c & 0x3ff) + 0xdc00); l = 2; } s[l] = 0; return l; }
0
[]
bash
863d31ae775d56b785dc5b0105b6d251515d81d5
262,168,089,615,348,660,000,000,000,000,000,000,000
22
commit bash-20120224 snapshot
char *uuid_string(char *buf, char *end, const u8 *addr, struct printf_spec spec, const char *fmt) { char uuid[UUID_STRING_LEN + 1]; char *p = uuid; int i; const u8 *index = uuid_index; bool uc = false; switch (*(++fmt)) { case 'L': uc = true; /* fall-through */ case 'l': index = guid_index; break; case 'B': uc = true; break; } for (i = 0; i < 16; i++) { if (uc) p = hex_byte_pack_upper(p, addr[index[i]]); else p = hex_byte_pack(p, addr[index[i]]); switch (i) { case 3: case 5: case 7: case 9: *p++ = '-'; break; } } *p = 0; return string(buf, end, uuid, spec); }
0
[ "CWE-200" ]
linux
ad67b74d2469d9b82aaa572d76474c95bc484d57
283,773,767,375,054,360,000,000,000,000,000,000,000
39
printk: hash addresses printed with %p Currently there exist approximately 14 000 places in the kernel where addresses are being printed using an unadorned %p. This potentially leaks sensitive information regarding the Kernel layout in memory. Many of these calls are stale, instead of fixing every call lets hash the address by default before printing. This will of course break some users, forcing code printing needed addresses to be updated. Code that _really_ needs the address will soon be able to use the new printk specifier %px to print the address. For what it's worth, usage of unadorned %p can be broken down as follows (thanks to Joe Perches). $ git grep -E '%p[^A-Za-z0-9]' | cut -f1 -d"/" | sort | uniq -c 1084 arch 20 block 10 crypto 32 Documentation 8121 drivers 1221 fs 143 include 101 kernel 69 lib 100 mm 1510 net 40 samples 7 scripts 11 security 166 sound 152 tools 2 virt Add function ptr_to_id() to map an address to a 32 bit unique identifier. Hash any unadorned usage of specifier %p and any malformed specifiers. Signed-off-by: Tobin C. Harding <[email protected]>
QPDFObjectHandle::newDictionary( std::map<std::string, QPDFObjectHandle> const& items) { return QPDFObjectHandle(new QPDF_Dictionary(items)); }
0
[ "CWE-835" ]
qpdf
afe0242b263a9e1a8d51dd81e42ab6de2e5127eb
144,714,869,544,243,730,000,000,000,000,000,000,000
5
Handle object ID 0 (fixes #99) This is CVE-2017-9208. The QPDF library uses object ID 0 internally as a sentinel to represent a direct object, but prior to this fix, was not blocking handling of 0 0 obj or 0 0 R as a special case. Creating an object in the file with 0 0 obj could cause various infinite loops. The PDF spec doesn't allow for object 0. Having qpdf handle object 0 might be a better fix, but changing all the places in the code that assumes objid == 0 means direct would be risky.
static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
45,203,907,469,150,390,000,000,000,000,000,000,000
7
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
xsltCompilePatternInternal(const xmlChar *pattern, xmlDocPtr doc, xmlNodePtr node, xsltStylesheetPtr style, xsltTransformContextPtr runtime, int novar) { xsltParserContextPtr ctxt = NULL; xsltCompMatchPtr element, first = NULL, previous = NULL; int current, start, end, level, j; if (pattern == NULL) { xsltTransformError(NULL, NULL, node, "xsltCompilePattern : NULL pattern\n"); return(NULL); } ctxt = xsltNewParserContext(style, runtime); if (ctxt == NULL) return(NULL); ctxt->doc = doc; ctxt->elem = node; current = end = 0; while (pattern[current] != 0) { start = current; while (IS_BLANK_CH(pattern[current])) current++; end = current; level = 0; while ((pattern[end] != 0) && ((pattern[end] != '|') || (level != 0))) { if (pattern[end] == '[') level++; else if (pattern[end] == ']') level--; else if (pattern[end] == '\'') { end++; while ((pattern[end] != 0) && (pattern[end] != '\'')) end++; } else if (pattern[end] == '"') { end++; while ((pattern[end] != 0) && (pattern[end] != '"')) end++; } if (pattern[end] == 0) break; end++; } if (current == end) { xsltTransformError(NULL, NULL, node, "xsltCompilePattern : NULL pattern\n"); goto error; } element = xsltNewCompMatch(); if (element == NULL) { goto error; } if (first == NULL) first = element; else if (previous != NULL) previous->next = element; previous = element; ctxt->comp = element; ctxt->base = xmlStrndup(&pattern[start], end - start); if (ctxt->base == NULL) goto error; ctxt->cur = &(ctxt->base)[current - start]; element->pattern = ctxt->base; element->nsList = xmlGetNsList(doc, node); j = 0; if (element->nsList != NULL) { while (element->nsList[j] != NULL) j++; } element->nsNr = j; #ifdef WITH_XSLT_DEBUG_PATTERN xsltGenericDebug(xsltGenericDebugContext, "xsltCompilePattern : parsing '%s'\n", element->pattern); #endif /* Preset default priority to be zero. This may be changed by xsltCompileLocationPathPattern. */ element->priority = 0; xsltCompileLocationPathPattern(ctxt, novar); if (ctxt->error) { xsltTransformError(NULL, style, node, "xsltCompilePattern : failed to compile '%s'\n", element->pattern); if (style != NULL) style->errors++; goto error; } /* * Reverse for faster interpretation. */ xsltReverseCompMatch(ctxt, element); /* * Set-up the priority */ if (element->priority == 0) { /* if not yet determined */ if (((element->steps[0].op == XSLT_OP_ELEM) || (element->steps[0].op == XSLT_OP_ATTR) || (element->steps[0].op == XSLT_OP_PI)) && (element->steps[0].value != NULL) && (element->steps[1].op == XSLT_OP_END)) { ; /* previously preset */ } else if ((element->steps[0].op == XSLT_OP_ATTR) && (element->steps[0].value2 != NULL) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.25; } else if ((element->steps[0].op == XSLT_OP_NS) && (element->steps[0].value != NULL) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.25; } else if ((element->steps[0].op == XSLT_OP_ATTR) && (element->steps[0].value == NULL) && (element->steps[0].value2 == NULL) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.5; } else if (((element->steps[0].op == XSLT_OP_PI) || (element->steps[0].op == XSLT_OP_TEXT) || (element->steps[0].op == XSLT_OP_ALL) || (element->steps[0].op == XSLT_OP_NODE) || (element->steps[0].op == XSLT_OP_COMMENT)) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.5; } else { element->priority = 0.5; } } #ifdef WITH_XSLT_DEBUG_PATTERN xsltGenericDebug(xsltGenericDebugContext, "xsltCompilePattern : parsed %s, default priority %f\n", element->pattern, element->priority); #endif if (pattern[end] == '|') end++; current = end; } if (end == 0) { xsltTransformError(NULL, style, node, "xsltCompilePattern : NULL pattern\n"); if (style != NULL) style->errors++; goto error; } xsltFreeParserContext(ctxt); return(first); error: if (ctxt != NULL) xsltFreeParserContext(ctxt); if (first != NULL) xsltFreeCompMatchList(first); return(NULL); }
0
[ "CWE-125" ]
libxslt
fe5a4fa33eb85bce3253ed3742b1ea6c4b59b41b
211,834,788,753,963,000,000,000,000,000,000,000,000
157
Fix some case of pattern parsing errors We could accidentally hit an off by one string array access due to improper loop exit when parsing patterns
void clear() { if (ctx_pktlen_) { EVP_CIPHER_CTX_cleanup(ctx_pktlen_); EVP_CIPHER_CTX_free(ctx_pktlen_); ctx_pktlen_ = nullptr; } if (ctx_main_) { EVP_CIPHER_CTX_cleanup(ctx_main_); EVP_CIPHER_CTX_free(ctx_main_); ctx_main_ = nullptr; } if (polykey_) { EVP_PKEY_free(polykey_); polykey_ = nullptr; } if (md_ctx_) { EVP_MD_CTX_free(md_ctx_); md_ctx_ = nullptr; } // `polykey_ctx_` is not explicitly freed as it is freed implicitly when // `md_ctx_` is freed }
0
[ "CWE-78" ]
ssh2
f763271f41320e71d5cbee02ea5bc6a2ded3ca21
89,015,908,398,044,900,000,000,000,000,000,000,000
22
examples,lib,test: switch to code rewrite For more information see: https://github.com/mscdex/ssh2/issues/935
R_API const char *r_bin_entry_type_string(int etype) { switch (etype) { case R_BIN_ENTRY_TYPE_PROGRAM: return "program"; case R_BIN_ENTRY_TYPE_MAIN: return "main"; case R_BIN_ENTRY_TYPE_INIT: return "init"; case R_BIN_ENTRY_TYPE_FINI: return "fini"; case R_BIN_ENTRY_TYPE_TLS: return "tls"; } return NULL; }
0
[ "CWE-125" ]
radare2
d31c4d3cbdbe01ea3ded16a584de94149ecd31d9
308,515,168,518,012,200,000,000,000,000,000,000,000
15
Fix #8748 - Fix oobread on string search
static CURLcode add_per_transfer(struct per_transfer **per) { struct per_transfer *p; p = calloc(sizeof(struct per_transfer), 1); if(!p) return CURLE_OUT_OF_MEMORY; if(!transfers) /* first entry */ transfersl = transfers = p; else { /* make the last node point to the new node */ transfersl->next = p; /* make the new node point back to the formerly last node */ p->prev = transfersl; /* move the last node pointer to the new entry */ transfersl = p; } *per = p; all_xfers++; /* count total number of transfers added */ return CURLE_OK; }
0
[]
curl
8c7ee9083d0d719d0a77ab20d9cc2ae84eeea7f3
328,244,282,498,540,030,000,000,000,000,000,000,000
21
post_per_transfer: remove the updated file name When --remove-on-error is used with --no-clobber, it might have an updated file name to remove. Bug: https://curl.se/docs/CVE-2022-27778.html CVE-2022-27778 Reported-by: Harry Sintonen Closes #8824
static bool torture_smb2_notify_close(struct torture_context *torture, struct smb2_tree *tree1) { bool ret = true; NTSTATUS status; union smb_notify notify; union smb_open io; struct smb2_handle h1; struct smb2_request *req; smb2_deltree(tree1, BASEDIR_CNC); smb2_util_rmdir(tree1, BASEDIR_CNC); torture_comment(torture, "TESTING CHANGE NOTIFY FOLLOWED BY ULOGOFF\n"); /* get a handle on the directory */ ZERO_STRUCT(io.smb2); io.generic.level = RAW_OPEN_SMB2; io.smb2.in.create_flags = 0; io.smb2.in.desired_access = SEC_FILE_ALL; io.smb2.in.create_options = NTCREATEX_OPTIONS_DIRECTORY; io.smb2.in.file_attributes = FILE_ATTRIBUTE_NORMAL; io.smb2.in.share_access = NTCREATEX_SHARE_ACCESS_READ | NTCREATEX_SHARE_ACCESS_WRITE; io.smb2.in.alloc_size = 0; io.smb2.in.create_disposition = NTCREATEX_DISP_CREATE; io.smb2.in.impersonation_level = SMB2_IMPERSONATION_ANONYMOUS; io.smb2.in.security_flags = 0; io.smb2.in.fname = BASEDIR_CNC; status = smb2_create(tree1, torture, &(io.smb2)); CHECK_STATUS(status, NT_STATUS_OK); io.smb2.in.create_disposition = NTCREATEX_DISP_OPEN; status = smb2_create(tree1, torture, &(io.smb2)); CHECK_STATUS(status, NT_STATUS_OK); h1 = io.smb2.out.file.handle; /* ask for a change notify, on file or directory name changes */ ZERO_STRUCT(notify.smb2); notify.smb2.level = RAW_NOTIFY_SMB2; notify.smb2.in.buffer_size = 1000; notify.smb2.in.completion_filter = FILE_NOTIFY_CHANGE_NAME; notify.smb2.in.file.handle = h1; notify.smb2.in.recursive = true; req = smb2_notify_send(tree1, &(notify.smb2)); WAIT_FOR_ASYNC_RESPONSE(req); status = smb2_util_close(tree1, h1); CHECK_STATUS(status, NT_STATUS_OK); status = smb2_notify_recv(req, torture, &(notify.smb2)); CHECK_STATUS(status, NT_STATUS_NOTIFY_CLEANUP); CHECK_VAL(notify.smb2.out.num_changes, 0); done: smb2_deltree(tree1, BASEDIR_CNC); return ret; }
0
[ "CWE-266" ]
samba
22528b76ed6eb6251fdf01875aaa955480e7663d
226,565,731,282,711,470,000,000,000,000,000,000,000
64
s4: torture: Add smb2.notify.handle-permissions test. Add knownfail entry. CVE-2020-14318 BUG: https://bugzilla.samba.org/show_bug.cgi?id=14434 Signed-off-by: Jeremy Allison <[email protected]>
queue_list(int option, uschar **list, int count) { int i; int subcount; int now = (int)time(NULL); void *reset_point; queue_filename * qf = NULL; uschar subdirs[64]; /* If given a list of messages, build a chain containing their ids. */ if (count > 0) { queue_filename *last = NULL; for (i = 0; i < count; i++) { queue_filename *next = store_get(sizeof(queue_filename) + Ustrlen(list[i]) + 2); sprintf(CS next->text, "%s-H", list[i]); next->dir_uschar = '*'; next->next = NULL; if (i == 0) qf = next; else last->next = next; last = next; } } /* Otherwise get a list of the entire queue, in order if necessary. */ else qf = queue_get_spool_list( -1, /* entire queue */ subdirs, /* for holding sub list */ &subcount, /* for subcount */ option >= 8); /* randomize if required */ if (option >= 8) option -= 8; /* Now scan the chain and print information, resetting store used each time. */ for (reset_point = store_get(0); qf; spool_clear_header_globals(), store_reset(reset_point), qf = qf->next ) { int rc, save_errno; int size = 0; BOOL env_read; message_size = 0; message_subdir[0] = qf->dir_uschar; rc = spool_read_header(qf->text, FALSE, count <= 0); if (rc == spool_read_notopen && errno == ENOENT && count <= 0) continue; save_errno = errno; env_read = (rc == spool_read_OK || rc == spool_read_hdrerror); if (env_read) { int ptr; FILE *jread; struct stat statbuf; uschar * fname = spool_fname(US"input", message_subdir, qf->text, US""); ptr = Ustrlen(fname)-1; fname[ptr] = 'D'; /* Add the data size to the header size; don't count the file name at the start of the data file, but add one for the notional blank line that precedes the data. */ if (Ustat(fname, &statbuf) == 0) size = message_size + statbuf.st_size - SPOOL_DATA_START_OFFSET + 1; i = (now - received_time.tv_sec)/60; /* minutes on queue */ if (i > 90) { i = (i + 30)/60; if (i > 72) printf("%2dd ", (i + 12)/24); else printf("%2dh ", i); } else printf("%2dm ", i); /* Collect delivered addresses from any J file */ fname[ptr] = 'J'; if ((jread = Ufopen(fname, "rb"))) { while (Ufgets(big_buffer, big_buffer_size, jread) != NULL) { int n = Ustrlen(big_buffer); big_buffer[n-1] = 0; tree_add_nonrecipient(big_buffer); } (void)fclose(jread); } } fprintf(stdout, "%s ", string_format_size(size, big_buffer)); for (i = 0; i < 16; i++) fputc(qf->text[i], stdout); if (env_read && sender_address) { printf(" <%s>", sender_address); if (f.sender_set_untrusted) printf(" (%s)", originator_login); } if (rc != spool_read_OK) { printf("\n "); if (save_errno == ERRNO_SPOOLFORMAT) { struct stat statbuf; uschar * fname = spool_fname(US"input", message_subdir, qf->text, US""); if (Ustat(fname, &statbuf) == 0) printf("*** spool format error: size=" OFF_T_FMT " ***", statbuf.st_size); else printf("*** spool format error ***"); } else printf("*** spool read error: %s ***", strerror(save_errno)); if (rc != spool_read_hdrerror) { printf("\n\n"); continue; } } if (f.deliver_freeze) printf(" *** frozen ***"); printf("\n"); if (recipients_list) { for (i = 0; i < recipients_count; i++) { tree_node *delivered = tree_search(tree_nonrecipients, recipients_list[i].address); if (!delivered || option != 1) printf(" %s %s\n", delivered ? "D" : " ", recipients_list[i].address); if (delivered) delivered->data.val = TRUE; } if (option == 2 && tree_nonrecipients) queue_list_extras(tree_nonrecipients); printf("\n"); } } }
0
[ "CWE-78" ]
exim
7ea1237c783e380d7bdb86c90b13d8203c7ecf26
214,918,504,861,464,950,000,000,000,000,000,000,000
148
Events: raise msg:fail:internal & msg:complete for -Mrm. Bug 2310
static int console_puts(CharDriverState *chr, const uint8_t *buf, int len) { QemuConsole *s = chr->opaque; int i; s->update_x0 = s->width * FONT_WIDTH; s->update_y0 = s->height * FONT_HEIGHT; s->update_x1 = 0; s->update_y1 = 0; console_show_cursor(s, 0); for(i = 0; i < len; i++) { console_putchar(s, buf[i]); } console_show_cursor(s, 1); if (s->ds->have_gfx && s->update_x0 < s->update_x1) { dpy_gfx_update(s, s->update_x0, s->update_y0, s->update_x1 - s->update_x0, s->update_y1 - s->update_y0); } return len; }
0
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
23,398,522,601,132,030,000,000,000,000,000,000,000
21
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
Status AddOrExecuteNode(core::RefCountPtr<KernelAndDevice> kernel, EagerOperation* op, TensorHandle** retvals) { EagerExecutor& executor = op->Executor(); EagerContext& ctx = op->EagerContext(); GraphCollector* graph_collector = nullptr; if (ctx.ShouldStoreGraphs()) { graph_collector = ctx.GetGraphCollector(); } const int num_outputs = kernel->num_outputs(); absl::optional<EagerFunctionParams> eager_func_params = op->eager_func_params(); if (kernel->IsCrossProcess() && !eager_func_params.has_value()) { // Create an eager op id for a cross-process function if not exist. #if defined(IS_MOBILE_PLATFORM) return errors::Unimplemented( "Cross-process functions are not supported on mobile devices."); #else // !IS_MOBILE_PLATFORM const int64_t op_id = ctx.RemoteMgr()->NextOpId(); eager_func_params = EagerFunctionParams{op_id, /*step_id=*/absl::nullopt}; #endif // !IS_MOBILE_PLATFORM } if (executor.Async()) { const DataTypeVector& output_dtypes = kernel->output_dtypes(); for (int i = 0, end = num_outputs; i < end; ++i) { Device* output_device = ctx.CanonicalDevice(kernel->OutputDevice(i)); if (output_device == nullptr || output_device->IsLocal()) { retvals[i] = TensorHandle::CreateEmptyLocalHandle( /* d= */ output_device, /* op_device= */ kernel->device(), /* resource_device= */ kernel->OutputResourceDevice(i), output_dtypes[i], &ctx); } else { TF_RETURN_IF_ERROR( CreateUnshapedOutput(*kernel, i, output_device, output_dtypes[i], eager_func_params, &ctx, &retvals[i])); } } const absl::InlinedVector<TensorHandle*, 4>* inputs; TF_RETURN_IF_ERROR(op->TensorHandleInputs(&inputs)); auto node = absl::make_unique<AsyncExecuteNode>( &ctx, *inputs, eager_func_params, std::move(kernel), graph_collector, op->GetCancellationManager(), absl::Span<TensorHandle*>(retvals, num_outputs), op->GetStackTrace()); // Release the inputs from the eager operation since the AsyncExecuteNode // would have taken ownership. This allows the inputs to be forwarded if // possible. op->Clear(); // For async mode, execution order will make sure that all // input handles are ready before executing them. // TODO(b/137118203): Consider executing "cheap" kernels inline for // performance. return executor.AddOrExecute(std::move(node)); } else { for (int i = 0, end = num_outputs; i < end; ++i) { retvals[i] = nullptr; } const absl::InlinedVector<TensorHandle*, 4>* inputs; TF_RETURN_IF_ERROR(op->TensorHandleInputs(&inputs)); ExecuteNode node(&ctx, *inputs, eager_func_params, kernel, graph_collector, op->GetCancellationManager(), {retvals, static_cast<size_t>(num_outputs)}, op->GetStackTrace()); Status s = executor.SyncExecute(&node); // We release the inputs AFTER executing the operation in sync mode since // ExecuteNode does not increment the reference count and thus does not have // ownership of the inputs while executing. op->Clear(); return s; } }
0
[ "CWE-476", "CWE-475" ]
tensorflow
a5b89cd68c02329d793356bda85d079e9e69b4e7
224,361,629,604,453,220,000,000,000,000,000,000,000
69
Fix empty resource handle vulnerability. Some ops that attempt to extract a resource handle from user input can lead to nullptr dereferences. This returns an error in such a case. PiperOrigin-RevId: 445571938
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling) { while (vma) { struct vm_area_struct *next = vma->vm_next; unsigned long addr = vma->vm_start; /* * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ unlink_anon_vmas(vma); unlink_file_vma(vma); if (is_vm_hugetlb_page(vma)) { hugetlb_free_pgd_range(tlb, addr, vma->vm_end, floor, next ? next->vm_start : ceiling); } else { /* * Optimization: gather nearby vmas into one call down */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { vma = next; next = vma->vm_next; unlink_anon_vmas(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, floor, next ? next->vm_start : ceiling); } vma = next; } }
0
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
103,921,405,653,675,000,000,000,000,000,000,000,000
34
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <[email protected]> Original-patch-by: Michal Hocko <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Michal Hocko <[email protected]> Tested-by: Helge Deller <[email protected]> # parisc Signed-off-by: Linus Torvalds <[email protected]>
bfuThreadEnd(J9HookInterface** vmHooks, UDATA eventNum, void* eventData, void* userData) { J9VMThread* vmThread = ((J9VMThreadDestroyEvent*)eventData)->vmThread; HANDLE event = (HANDLE) vmThread->sidecarEvent; if (event) { CloseHandle(event); vmThread->sidecarEvent = NULL; } }
0
[ "CWE-119" ]
openj9
0971f22d88f42cf7332364ad7430e9bd8681c970
28,256,047,249,742,580,000,000,000,000,000,000,000
10
Clean up jio_snprintf and jio_vfprintf Fixes https://bugs.eclipse.org/bugs/show_bug.cgi?id=543659 Signed-off-by: Peter Bain <[email protected]>
zsetstackprotect(i_ctx_t *i_ctx_p) { os_ptr op = osp; ref *ep = oparray_find(i_ctx_p); check_type(*op, t_boolean); if (ep == 0) return_error(gs_error_rangecheck); ep->value.opproc = (op->value.boolval ? oparray_cleanup : oparray_no_cleanup); pop(1); return 0; }
0
[]
ghostpdl
b575e1ec42cc86f6a58c603f2a88fcc2af699cc8
52,255,046,458,146,920,000,000,000,000,000,000,000
13
Bug 699668: handle stack overflow during error handling When handling a Postscript error, we push the object throwing the error onto the operand stack for the error handling procedure to access - we were not checking the available stack before doing so, thus causing a crash. Basically, if we get a stack overflow when already handling an error, we're out of options, return to the caller with a fatal error.
GF_Err svhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_SphericalVideoInfoBox *ptr = (GF_SphericalVideoInfoBox *)s; if ((u32)ptr->size >= (u32)0xFFFFFFFF) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid size %llu in svhd box\n", ptr->size)); return GF_ISOM_INVALID_FILE; } ptr->string = gf_malloc(sizeof(char) * ((u32) ptr->size+1)); if (!ptr->string) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->string, (u32) ptr->size); ptr->string[ptr->size] = 0; return GF_OK; }
0
[ "CWE-787" ]
gpac
77510778516803b7f7402d7423c6d6bef50254c3
338,266,827,642,087,350,000,000,000,000,000,000,000
13
fixed #2255
//! Access to pixel value with Dirichlet boundary conditions for the coordinate (\c pos) \const. T atN(const int pos, const int x, const int y, const int z, const int c, const T& out_value) const { return (pos<0 || pos>=width())?out_value:(*this)(pos,x,y,z,c);
0
[ "CWE-119", "CWE-787" ]
CImg
ac8003393569aba51048c9d67e1491559877b1d1
256,825,182,147,862,800,000,000,000,000,000,000,000
3
.
TEST_P(JSITest, RuntimeTest) { auto v = rt.evaluateJavaScript(std::make_unique<StringBuffer>("1"), ""); EXPECT_EQ(v.getNumber(), 1); rt.evaluateJavaScript(std::make_unique<StringBuffer>("x = 1"), ""); EXPECT_EQ(rt.global().getProperty(rt, "x").getNumber(), 1); }
0
[ "CWE-843", "CWE-125" ]
hermes
fe52854cdf6725c2eaa9e125995da76e6ceb27da
188,986,993,291,325,430,000,000,000,000,000,000,000
7
[CVE-2020-1911] Look up HostObject computed properties on the right object in the prototype chain. Summary: The change in the hermes repository fixes the security vulnerability CVE-2020-1911. This vulnerability only affects applications which allow evaluation of uncontrolled, untrusted JavaScript code not shipped with the app, so React Native apps will generally not be affected. This revision includes a test for the bug. The test is generic JSI code, so it is included in the hermes and react-native repositories. Changelog: [Internal] Reviewed By: tmikov Differential Revision: D23322992 fbshipit-source-id: 4e88c974afe1ad33a263f9cac03e9dc98d33649a
static void reloc_gpu_flush(struct reloc_cache *cache) { GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; i915_gem_object_unpin_map(cache->rq->batch->obj); i915_gem_chipset_flush(cache->rq->i915); i915_request_add(cache->rq); cache->rq = NULL; }
0
[ "CWE-20" ]
linux
594cc251fdd0d231d342d88b2fdff4bc42fb0690
292,838,649,982,946,800,000,000,000,000,000,000,000
10
make 'user_access_begin()' do 'access_ok()' Originally, the rule used to be that you'd have to do access_ok() separately, and then user_access_begin() before actually doing the direct (optimized) user access. But experience has shown that people then decide not to do access_ok() at all, and instead rely on it being implied by other operations or similar. Which makes it very hard to verify that the access has actually been range-checked. If you use the unsafe direct user accesses, hardware features (either SMAP - Supervisor Mode Access Protection - on x86, or PAN - Privileged Access Never - on ARM) do force you to use user_access_begin(). But nothing really forces the range check. By putting the range check into user_access_begin(), we actually force people to do the right thing (tm), and the range check vill be visible near the actual accesses. We have way too long a history of people trying to avoid them. Signed-off-by: Linus Torvalds <[email protected]>
static void __exit vmx_exit(void) { free_page((unsigned long)vmx_msr_bitmap_legacy); free_page((unsigned long)vmx_msr_bitmap_longmode); free_page((unsigned long)vmx_io_bitmap_b); free_page((unsigned long)vmx_io_bitmap_a); kvm_exit(); }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
249,301,687,782,990,740,000,000,000,000,000,000,000
9
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx) { struct xfrm_user_sec_ctx *uctx = NULL; int ctx_size = sec_ctx->sadb_x_ctx_len; uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL); if (!uctx) return NULL; uctx->len = pfkey_sec_ctx_len(sec_ctx); uctx->exttype = sec_ctx->sadb_x_sec_exttype; uctx->ctx_doi = sec_ctx->sadb_x_ctx_doi; uctx->ctx_alg = sec_ctx->sadb_x_ctx_alg; uctx->ctx_len = sec_ctx->sadb_x_ctx_len; memcpy(uctx + 1, sec_ctx + 1, uctx->ctx_len); return uctx; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
247,270,638,925,864,400,000,000,000,000,000,000,000
20
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
e1000e_read_ext_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) { union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; *buff_addr = le64_to_cpu(d->read.buffer_addr); }
0
[ "CWE-835" ]
qemu
4154c7e03fa55b4cf52509a83d50d6c09d743b77
146,591,598,677,450,300,000,000,000,000,000,000,000
5
net: e1000e: fix an infinite loop issue This issue is like the issue in e1000 network card addressed in this commit: e1000: eliminate infinite loops on out-of-bounds transfer start. Signed-off-by: Li Qiang <[email protected]> Reviewed-by: Dmitry Fleytman <[email protected]> Signed-off-by: Jason Wang <[email protected]>
sv_viins_modestr (value) const char *value; { if (value && *value) { FREE (_rl_vi_ins_mode_str); _rl_vi_ins_mode_str = (char *)xmalloc (2 * strlen (value) + 1); rl_translate_keyseq (value, _rl_vi_ins_mode_str, &_rl_vi_ins_modestr_len); _rl_vi_ins_mode_str[_rl_vi_ins_modestr_len] = '\0'; return 0; } else if (value) { FREE (_rl_vi_ins_mode_str); _rl_vi_ins_mode_str = (char *)xmalloc (1); _rl_vi_ins_mode_str[_rl_vi_ins_modestr_len = 0] = '\0'; return 0; } else if (value == 0) { FREE (_rl_vi_ins_mode_str); _rl_vi_ins_mode_str = 0; /* prompt_modestr does the right thing */ _rl_vi_ins_modestr_len = 0; return 0; } return 1; }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
332,050,845,633,440,250,000,000,000,000,000,000,000
27
bash-4.4-rc2 release
void do_copy_file(struct st_command *command) { int error; static DYNAMIC_STRING ds_from_file; static DYNAMIC_STRING ds_to_file; const struct command_arg copy_file_args[] = { { "from_file", ARG_STRING, TRUE, &ds_from_file, "Filename to copy from" }, { "to_file", ARG_STRING, TRUE, &ds_to_file, "Filename to copy to" } }; DBUG_ENTER("do_copy_file"); check_command_args(command, command->first_argument, copy_file_args, sizeof(copy_file_args)/sizeof(struct command_arg), ' '); DBUG_PRINT("info", ("Copy %s to %s", ds_from_file.str, ds_to_file.str)); /* MY_HOLD_ORIGINAL_MODES prevents attempts to chown the file */ error= (my_copy(ds_from_file.str, ds_to_file.str, MYF(MY_DONT_OVERWRITE_FILE | MY_WME | MY_HOLD_ORIGINAL_MODES)) != 0); handle_command_error(command, error, my_errno); dynstr_free(&ds_from_file); dynstr_free(&ds_to_file); DBUG_VOID_RETURN; }
0
[]
server
01b39b7b0730102b88d8ea43ec719a75e9316a1e
107,763,961,908,082,850,000,000,000,000,000,000,000
25
mysqltest: don't eat new lines in --exec pass them through as is
struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) { struct sock *sk; sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); if (sk) { sk->sk_family = family; /* * See comment in struct sock definition to understand * why we need sk_prot_creator -acme */ sk->sk_prot = sk->sk_prot_creator = prot; sock_lock_init(sk); sock_net_set(sk, get_net(net)); atomic_set(&sk->sk_wmem_alloc, 1); sock_update_classid(sk); sock_update_netprioidx(sk, current); } return sk; }
0
[ "CWE-284", "CWE-264" ]
linux
3e10986d1d698140747fcfc2761ec9cb64c1d582
20,413,334,506,518,680,000,000,000,000,000,000,000
23
net: guard tcp_set_keepalive() to tcp sockets Its possible to use RAW sockets to get a crash in tcp_set_keepalive() / sk_reset_timer() Fix is to make sure socket is a SOCK_STREAM one. Reported-by: Dave Jones <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
gdm_session_reset (GdmSession *self) { if (self->priv->user_verifier_interface != NULL) { gdm_dbus_user_verifier_emit_reset (self->priv->user_verifier_interface); } do_reset (self); }
0
[]
gdm
5ac224602f1d603aac5eaa72e1760d3e33a26f0a
104,021,718,596,877,020,000,000,000,000,000,000,000
8
session: disconnect signals from worker proxy when conversation is freed We don't want an outstanding reference on the worker proxy to lead to signal handlers getting dispatched after the conversation is freed. https://bugzilla.gnome.org/show_bug.cgi?id=758032
gnutls_handshake (gnutls_session_t session) { int ret; if ((ret = _gnutls_handshake_hash_init (session)) < 0) { gnutls_assert (); return ret; } if (session->security_parameters.entity == GNUTLS_CLIENT) { ret = _gnutls_handshake_client (session); } else { ret = _gnutls_handshake_server (session); } if (ret < 0) { /* In the case of a rehandshake abort * we should reset the handshake's internal state. */ if (_gnutls_abort_handshake (session, ret) == 0) STATE = STATE0; return ret; } ret = _gnutls_handshake_common (session); if (ret < 0) { if (_gnutls_abort_handshake (session, ret) == 0) STATE = STATE0; return ret; } STATE = STATE0; _gnutls_handshake_io_buffer_clear (session); _gnutls_handshake_internal_state_clear (session); return 0; }
0
[ "CWE-189" ]
gnutls
bc8102405fda11ea00ca3b42acc4f4bce9d6e97b
95,372,876,226,441,160,000,000,000,000,000,000,000
46
Fix GNUTLS-SA-2008-1 security vulnerabilities. See http://www.gnu.org/software/gnutls/security.html for updates.
struct asn1_data *asn1_init(TALLOC_CTX *mem_ctx) { struct asn1_data *ret = talloc_zero(mem_ctx, struct asn1_data); if (ret == NULL) { DEBUG(0,("asn1_init failed! out of memory\n")); } return ret; }
0
[ "CWE-399" ]
samba
9d989c9dd7a5b92d0c5d65287935471b83b6e884
52,597,110,707,779,870,000,000,000,000,000,000,000
8
CVE-2015-7540: lib: util: Check *every* asn1 return call and early return. BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187 Signed-off-by: Jeremy Allison <[email protected]> Reviewed-by: Volker Lendecke <[email protected]> Autobuild-User(master): Jeremy Allison <[email protected]> Autobuild-Date(master): Fri Sep 19 01:29:00 CEST 2014 on sn-devel-104 (cherry picked from commit b9d3fd4cc551df78a7b066ee8ce43bbaa3ff994a)
static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } }
0
[ "CWE-125" ]
FreeRDP
b8beb55913471952f92770c90c372139d78c16c0
138,189,390,021,173,400,000,000,000,000,000,000,000
23
Fixed OOB read in update_read_cache_bitmap_v3_order CVE-2020-11096 thanks @antonio-morales for finding this.
mrb_closure_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p = mrb_proc_new(mrb, irep); closure_setup(mrb, p); return p; }
0
[ "CWE-476", "CWE-190" ]
mruby
f5e10c5a79a17939af763b1dcf5232ce47e24a34
131,137,726,325,720,640,000,000,000,000,000,000,000
7
proc.c: add `mrb_state` argument to `mrb_proc_copy()`. The function may invoke the garbage collection and it requires `mrb_state` to run.
void AuthorizationManagerImpl::_invalidateUserCache_inlock() { _updateCacheGeneration_inlock(); for (stdx::unordered_map<UserName, User*>::iterator it = _userCache.begin(); it != _userCache.end(); ++it) { fassert(17266, it->second != internalSecurity.user); it->second->invalidate(); } _userCache.clear(); // Reread the schema version before acquiring the next user. _version = schemaVersionInvalid; }
0
[ "CWE-613" ]
mongo
6dfb92b1299de04677d0bd2230e89a52eb01003c
141,883,593,254,408,500,000,000,000,000,000,000,000
13
SERVER-38984 Validate unique User ID on UserCache hit (cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7)
ebews_set_anniversary (ESoapMessage *message, EContact *contact) { }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
266,360,229,752,519,030,000,000,000,000,000,000,000
5
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, const struct in6_addr *daddr, const struct in6_addr *saddr) { __ip6_dst_store(sk, dst, daddr, saddr); }
0
[ "CWE-416", "CWE-284", "CWE-264" ]
linux
45f6fad84cc305103b28d73482b344d7f5b76f39
54,993,790,996,427,390,000,000,000,000,000,000,000
6
ipv6: add complete rcu protection around np->opt This patch addresses multiple problems : UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions while socket is not locked : Other threads can change np->opt concurrently. Dmitry posted a syzkaller (http://github.com/google/syzkaller) program desmonstrating use-after-free. Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock() and dccp_v6_request_recv_sock() also need to use RCU protection to dereference np->opt once (before calling ipv6_dup_options()) This patch adds full RCU protection to np->opt Reported-by: Dmitry Vyukov <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int cmp_name(const void *name, const void *sym) { return strcmp(name, kernel_symbol_name(sym)); }
0
[ "CWE-362", "CWE-347" ]
linux
0c18f29aae7ce3dadd26d8ee3505d07cc982df75
324,707,863,653,229,240,000,000,000,000,000,000,000
4
module: limit enabling module.sig_enforce Irrespective as to whether CONFIG_MODULE_SIG is configured, specifying "module.sig_enforce=1" on the boot command line sets "sig_enforce". Only allow "sig_enforce" to be set when CONFIG_MODULE_SIG is configured. This patch makes the presence of /sys/module/module/parameters/sig_enforce dependent on CONFIG_MODULE_SIG=y. Fixes: fda784e50aac ("module: export module signature enforcement status") Reported-by: Nayna Jain <[email protected]> Tested-by: Mimi Zohar <[email protected]> Tested-by: Jessica Yu <[email protected]> Signed-off-by: Mimi Zohar <[email protected]> Signed-off-by: Jessica Yu <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
camel_pop3_store_cache_has (CamelPOP3Store *store, const gchar *uid) { CamelStream *stream; gboolean uid_is_cached; g_return_val_if_fail (CAMEL_IS_POP3_STORE (store), FALSE); g_return_val_if_fail (uid != NULL, FALSE); stream = camel_pop3_store_cache_get (store, uid, NULL); uid_is_cached = (stream != NULL); g_clear_object (&stream); return uid_is_cached; }
0
[ "CWE-74" ]
evolution-data-server
ba82be72cfd427b5d72ff21f929b3a6d8529c4df
212,745,253,923,680,370,000,000,000,000,000,000,000
15
I#226 - CVE-2020-14928: Response Injection via STARTTLS in SMTP and POP3 Closes https://gitlab.gnome.org/GNOME/evolution-data-server/-/issues/226
setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { int usig = signr_convert(sig); int ret; /* Set up the stack frame */ if (is_ia32) { if (ka->sa.sa_flags & SA_SIGINFO) ret = ia32_setup_rt_frame(usig, ka, info, set, regs); else ret = ia32_setup_frame(usig, ka, set, regs); } else ret = __setup_rt_frame(sig, ka, info, set, regs); if (ret) { force_sigsegv(sig, current); return -EFAULT; } return ret; }
0
[ "CWE-400" ]
linux-stable-rt
bcf6b1d78c0bde228929c388978ed3af9a623463
114,993,035,677,060,600,000,000,000,000,000,000,000
22
signal/x86: Delay calling signals in atomic On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using a per CPU debug stack defined by the IST. If we schedule out, another task can come in and use the same stack and cause the stack to be corrupted and crash the kernel on return. When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and one of these is the spin lock used in signal handling. Some of the debug code (int3) causes do_trap() to send a signal. This function calls a spin lock that has been converted to a mutex and has the possibility to sleep. If this happens, the above issues with the corrupted stack is possible. Instead of calling the signal right away, for PREEMPT_RT and x86_64, the signal information is stored on the stacks task_struct and TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume code will send the signal when preemption is enabled. [ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] Cc: [email protected] Signed-off-by: Oleg Nesterov <[email protected]> Signed-off-by: Steven Rostedt <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]>
CtPtr ProtocolV2::write(const std::string &desc, CONTINUATION_TYPE<ProtocolV2> &next, F &frame) { ceph::bufferlist bl; try { bl = frame.get_buffer(session_stream_handlers); } catch (ceph::crypto::onwire::TxHandlerError &e) { ldout(cct, 1) << __func__ << " " << e.what() << dendl; return _fault(); } return write(desc, next, bl); }
0
[ "CWE-323" ]
ceph
20b7bb685c5ea74c651ca1ea547ac66b0fee7035
259,904,580,120,779,650,000,000,000,000,000,000,000
12
msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities The secure mode uses AES-128-GCM with 96-bit nonces consisting of a 32-bit counter followed by a 64-bit salt. The counter is incremented after processing each frame, the salt is fixed for the duration of the session. Both are initialized from the session key generated during session negotiation, so the counter starts with essentially a random value. It is allowed to wrap, and, after 2**32 frames, it repeats, resulting in nonce reuse (the actual sequence numbers that the messenger works with are 64-bit, so the session continues on). Because of how GCM works, this completely breaks both confidentiality and integrity aspects of the secure mode. A single nonce reuse reveals the XOR of two plaintexts and almost completely reveals the subkey used for producing authentication tags. After a few nonces get used twice, all confidentiality and integrity goes out the window and the attacker can potentially encrypt-authenticate plaintext of their choice. We can't easily change the nonce format to extend the counter to 64 bits (and possibly XOR it with a longer salt). Instead, just remember the initial nonce and cut the session before it repeats, forcing renegotiation. Signed-off-by: Ilya Dryomov <[email protected]> Reviewed-by: Radoslaw Zarzynski <[email protected]> Reviewed-by: Sage Weil <[email protected]> Conflicts: src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17 ("msg: Build target 'common' without using namespace in headers") not in octopus ]
void jslReset() { jslSeekTo(0); }
0
[ "CWE-787" ]
Espruino
bed844f109b6c222816740555068de2e101e8018
251,099,230,912,361,700,000,000,000,000,000,000,000
3
remove strncpy usage as it's effectively useless, replace with an assertion since fn is only used internally (fix #1426)
void makeNullEqualityBounds(const IndexEntry& index, bool isHashed, OrderedIntervalList* oil, IndexBoundsBuilder::BoundsTightness* tightnessOut) { // An equality to null predicate cannot be covered because the index does not distinguish // between the lack of a value and the literal value null. *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH; // There are two values that could possibly be equal to null in an index: undefined and null. oil->intervals.push_back(isHashed ? kHashedUndefinedInterval : IndexBoundsBuilder::makePointInterval(kUndefinedElementObj)); oil->intervals.push_back(isHashed ? kHashedNullInterval : IndexBoundsBuilder::makePointInterval(kNullElementObj)); // Just to be sure, make sure the bounds are in the right order if the hash values are opposite. IndexBoundsBuilder::unionize(oil); }
0
[ "CWE-754" ]
mongo
f8f55e1825ee5c7bdb3208fc7c5b54321d172732
317,160,364,427,720,500,000,000,000,000,000,000,000
17
SERVER-44377 generate correct plan for indexed inequalities to null
ossl_asn1_tag(VALUE obj) { VALUE tag; tag = ossl_asn1_get_tag(obj); if(NIL_P(tag)) ossl_raise(eASN1Error, "tag number not specified"); return NUM2INT(tag); }
0
[ "CWE-119" ]
openssl
1648afef33c1d97fb203c82291b8a61269e85d3b
105,653,701,329,750,840,000,000,000,000,000,000,000
10
asn1: fix out-of-bounds read in decoding constructed objects OpenSSL::ASN1.{decode,decode_all,traverse} have a bug of out-of-bounds read. int_ossl_asn1_decode0_cons() does not give the correct available length to ossl_asn1_decode() when decoding the inner components of a constructed object. This can cause out-of-bounds read if a crafted input given. Reference: https://hackerone.com/reports/170316