CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2017-15115
https://www.cvedetails.com/cve/CVE-2017-15115/
CWE-416
https://github.com/torvalds/linux/commit/df80cd9b28b9ebaa284a41df611dbf3a2d05ca74
df80cd9b28b9ebaa284a41df611dbf3a2d05ca74
sctp: do not peel off an assoc from one netns to another one Now when peeling off an association to the sock in another netns, all transports in this assoc are not to be rehashed and keep use the old key in hashtable. As a transport uses sk->net as the hash key to insert into hashtable, it would miss removing these transports from hashtable due to the new netns when closing the sock and all transports are being freeed, then later an use-after-free issue could be caused when looking up an asoc and dereferencing those transports. This is a very old issue since very beginning, ChunYu found it with syzkaller fuzz testing with this series: socket$inet6_sctp() bind$inet6() sendto$inet6() unshare(0x40000000) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST() getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF() This patch is to block this call when peeling one assoc off from one netns to another one, so that the netns of all transport would not go out-sync with the key in hashtable. Note that this patch didn't fix it by rehashing transports, as it's difficult to handle the situation when the tuple is already in use in the new netns. Besides, no one would like to peel off one assoc to another netns, considering ipaddrs, ifaces, etc. are usually different. Reported-by: ChunYu Wang <[email protected]> Signed-off-by: Xin Long <[email protected]> Acked-by: Marcelo Ricardo Leitner <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int sctp_setsockopt_default_prinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(info)) goto out; if (copy_from_user(&info, optval, sizeof(info))) { retval = -EFAULT; goto out; } if (info.pr_policy & ~SCTP_PR_SCTP_MASK) goto out; if (info.pr_policy == SCTP_PR_SCTP_NONE) info.pr_value = 0; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); asoc->default_timetolive = info.pr_value; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); sp->default_timetolive = info.pr_value; } else { goto out; } retval = 0; out: return retval; }
static int sctp_setsockopt_default_prinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(info)) goto out; if (copy_from_user(&info, optval, sizeof(info))) { retval = -EFAULT; goto out; } if (info.pr_policy & ~SCTP_PR_SCTP_MASK) goto out; if (info.pr_policy == SCTP_PR_SCTP_NONE) info.pr_value = 0; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); asoc->default_timetolive = info.pr_value; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); sp->default_timetolive = info.pr_value; } else { goto out; } retval = 0; out: return retval; }
C
linux
0
CVE-2016-4558
https://www.cvedetails.com/cve/CVE-2016-4558/
null
https://github.com/torvalds/linux/commit/92117d8443bc5afacc8d5ba82e541946310f106e
92117d8443bc5afacc8d5ba82e541946310f106e
bpf: fix refcnt overflow On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK, the malicious application may overflow 32-bit bpf program refcnt. It's also possible to overflow map refcnt on 1Tb system. Impose 32k hard limit which means that the same bpf program or map cannot be shared by more than 32k processes. Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Reported-by: Jann Horn <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int bpf_map_release(struct inode *inode, struct file *filp) { bpf_map_put_with_uref(filp->private_data); return 0; }
static int bpf_map_release(struct inode *inode, struct file *filp) { bpf_map_put_with_uref(filp->private_data); return 0; }
C
linux
0
CVE-2012-0207
https://www.cvedetails.com/cve/CVE-2012-0207/
CWE-399
https://github.com/torvalds/linux/commit/a8c1f65c79cbbb2f7da782d4c9d15639a9b94b27
a8c1f65c79cbbb2f7da782d4c9d15639a9b94b27
igmp: Avoid zero delay when receiving odd mixture of IGMP queries Commit 5b7c84066733c5dfb0e4016d939757b38de189e4 ('ipv4: correct IGMP behavior on v3 query during v2-compatibility mode') added yet another case for query parsing, which can result in max_delay = 0. Substitute a value of 1, as in the usual v3 case. Reported-by: Simon McVittie <[email protected]> References: http://bugs.debian.org/654876 Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void ip_mc_drop_socket(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml; struct net *net = sock_net(sk); if (inet->mc_list == NULL) return; rtnl_lock(); while ((iml = rtnl_dereference(inet->mc_list)) != NULL) { struct in_device *in_dev; inet->mc_list = iml->next_rcu; in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); (void) ip_mc_leave_src(sk, iml, in_dev); if (in_dev != NULL) ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); kfree_rcu(iml, rcu); } rtnl_unlock(); }
void ip_mc_drop_socket(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml; struct net *net = sock_net(sk); if (inet->mc_list == NULL) return; rtnl_lock(); while ((iml = rtnl_dereference(inet->mc_list)) != NULL) { struct in_device *in_dev; inet->mc_list = iml->next_rcu; in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); (void) ip_mc_leave_src(sk, iml, in_dev); if (in_dev != NULL) ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); kfree_rcu(iml, rcu); } rtnl_unlock(); }
C
linux
0
CVE-2017-7586
https://www.cvedetails.com/cve/CVE-2017-7586/
CWE-119
https://github.com/erikd/libsndfile/commit/708e996c87c5fae77b104ccfeb8f6db784c32074
708e996c87c5fae77b104ccfeb8f6db784c32074
src/ : Move to a variable length header buffer Previously, the `psf->header` buffer was a fixed length specified by `SF_HEADER_LEN` which was set to `12292`. This was problematic for two reasons; this value was un-necessarily large for the majority of files and too small for some others. Now the size of the header buffer starts at 256 bytes and grows as necessary up to a maximum of 100k.
u_bitwidth_to_subformat (int bits) { static int array [] = { SF_FORMAT_PCM_U8, SF_FORMAT_PCM_16, SF_FORMAT_PCM_24, SF_FORMAT_PCM_32 } ; if (bits < 8 || bits > 32) return 0 ; return array [((bits + 7) / 8) - 1] ; } /* bitwidth_to_subformat */
u_bitwidth_to_subformat (int bits) { static int array [] = { SF_FORMAT_PCM_U8, SF_FORMAT_PCM_16, SF_FORMAT_PCM_24, SF_FORMAT_PCM_32 } ; if (bits < 8 || bits > 32) return 0 ; return array [((bits + 7) / 8) - 1] ; } /* bitwidth_to_subformat */
C
libsndfile
0
CVE-2017-14166
https://www.cvedetails.com/cve/CVE-2017-14166/
CWE-125
https://github.com/libarchive/libarchive/commit/fa7438a0ff4033e4741c807394a9af6207940d71
fa7438a0ff4033e4741c807394a9af6207940d71
Do something sensible for empty strings to make fuzzers happy.
xattr_new(struct archive_read *a, struct xar *xar, struct xmlattr_list *list) { struct xattr *xattr, **nx; struct xmlattr *attr; xattr = calloc(1, sizeof(*xattr)); if (xattr == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } xar->xattr = xattr; for (attr = list->first; attr != NULL; attr = attr->next) { if (strcmp(attr->name, "id") == 0) xattr->id = atol10(attr->value, strlen(attr->value)); } /* Chain to xattr list. */ for (nx = &(xar->file->xattr_list); *nx != NULL; nx = &((*nx)->next)) { if (xattr->id < (*nx)->id) break; } xattr->next = *nx; *nx = xattr; return (ARCHIVE_OK); }
xattr_new(struct archive_read *a, struct xar *xar, struct xmlattr_list *list) { struct xattr *xattr, **nx; struct xmlattr *attr; xattr = calloc(1, sizeof(*xattr)); if (xattr == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } xar->xattr = xattr; for (attr = list->first; attr != NULL; attr = attr->next) { if (strcmp(attr->name, "id") == 0) xattr->id = atol10(attr->value, strlen(attr->value)); } /* Chain to xattr list. */ for (nx = &(xar->file->xattr_list); *nx != NULL; nx = &((*nx)->next)) { if (xattr->id < (*nx)->id) break; } xattr->next = *nx; *nx = xattr; return (ARCHIVE_OK); }
C
libarchive
0
CVE-2015-6791
https://www.cvedetails.com/cve/CVE-2015-6791/
null
https://github.com/chromium/chromium/commit/7e995b26a5a503adefc0ad40435f7e16a45434c2
7e995b26a5a503adefc0ad40435f7e16a45434c2
Add a fake DriveFS launcher client. Using DriveFS requires building and deploying ChromeOS. Add a client for the fake DriveFS launcher to allow the use of a real DriveFS from a ChromeOS chroot to be used with a target_os="chromeos" build of chrome. This connects to the fake DriveFS launcher using mojo over a unix domain socket named by a command-line flag, using the launcher to create DriveFS instances. Bug: 848126 Change-Id: I22dcca154d41bda196dd7c1782bb503f6bcba5b1 Reviewed-on: https://chromium-review.googlesource.com/1098434 Reviewed-by: Xiyuan Xia <[email protected]> Commit-Queue: Sam McNally <[email protected]> Cr-Commit-Position: refs/heads/master@{#567513}
void FakeCrosDisksClient::AddObserver(Observer* observer) { observer_list_.AddObserver(observer); }
void FakeCrosDisksClient::AddObserver(Observer* observer) { observer_list_.AddObserver(observer); }
C
Chrome
0
CVE-2016-9537
https://www.cvedetails.com/cve/CVE-2016-9537/
CWE-787
https://github.com/vadz/libtiff/commit/83a4b92815ea04969d494416eaae3d4c6b338e4a#diff-c8b4b355f9b5c06d585b23138e1c185f
83a4b92815ea04969d494416eaae3d4c6b338e4a#diff-c8b4b355f9b5c06d585b23138e1c185f
* tools/tiffcrop.c: fix various out-of-bounds write vulnerabilities in heap or stack allocated buffers. Reported as MSVR 35093, MSVR 35096 and MSVR 35097. Discovered by Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. * tools/tiff2pdf.c: fix out-of-bounds write vulnerabilities in heap allocate buffer in t2p_process_jpeg_strip(). Reported as MSVR 35098. Discovered by Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. * libtiff/tif_pixarlog.c: fix out-of-bounds write vulnerabilities in heap allocated buffers. Reported as MSVR 35094. Discovered by Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities & Mitigations team. * libtiff/tif_write.c: fix issue in error code path of TIFFFlushData1() that didn't reset the tif_rawcc and tif_rawcp members. I'm not completely sure if that could happen in practice outside of the odd behaviour of t2p_seekproc() of tiff2pdf). The report points that a better fix could be to check the return value of TIFFFlushData1() in places where it isn't done currently, but it seems this patch is enough. Reported as MSVR 35095. Discovered by Axel Souchet & Vishal Chauhan & Suha Can from the MSRC Vulnerabilities & Mitigations team.
PixarLogSetupDecode(TIFF* tif) { static const char module[] = "PixarLogSetupDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* Make sure no byte swapping happens on the data * after decompression. */ tif->tif_postdecode = _TIFFNoPostDecode; /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); /* add one more stride in case input ends mid-stride */ tbuf_size = add_ms(tbuf_size, sizeof(uint16) * sp->stride); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); sp->tbuf_size = tbuf_size; if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle bits depth/data format combination (depth: %d)", td->td_bitspersample); return (0); } if (inflateInit(&sp->stream) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } }
PixarLogSetupDecode(TIFF* tif) { static const char module[] = "PixarLogSetupDecode"; TIFFDirectory *td = &tif->tif_dir; PixarLogState* sp = DecoderState(tif); tmsize_t tbuf_size; assert(sp != NULL); /* Make sure no byte swapping happens on the data * after decompression. */ tif->tif_postdecode = _TIFFNoPostDecode; /* for some reason, we can't do this in TIFFInitPixarLog */ sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); tbuf_size = multiply_ms(multiply_ms(multiply_ms(sp->stride, td->td_imagewidth), td->td_rowsperstrip), sizeof(uint16)); /* add one more stride in case input ends mid-stride */ tbuf_size = add_ms(tbuf_size, sizeof(uint16) * sp->stride); if (tbuf_size == 0) return (0); /* TODO: this is an error return without error report through TIFFErrorExt */ sp->tbuf = (uint16 *) _TIFFmalloc(tbuf_size); if (sp->tbuf == NULL) return (0); sp->tbuf_size = tbuf_size; if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) sp->user_datafmt = PixarLogGuessDataFmt(td); if (sp->user_datafmt == PIXARLOGDATAFMT_UNKNOWN) { TIFFErrorExt(tif->tif_clientdata, module, "PixarLog compression can't handle bits depth/data format combination (depth: %d)", td->td_bitspersample); return (0); } if (inflateInit(&sp->stream) != Z_OK) { TIFFErrorExt(tif->tif_clientdata, module, "%s", sp->stream.msg ? sp->stream.msg : "(null)"); return (0); } else { sp->state |= PLSTATE_INIT; return (1); } }
C
libtiff
0
CVE-2012-1179
https://www.cvedetails.com/cve/CVE-2012-1179/
CWE-264
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
4a1d704194a441bf83c636004a479e01360ec850
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream. In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int mem_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgroup, struct cgroup_taskset *tset) { return 0; }
static int mem_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgroup, struct cgroup_taskset *tset) { return 0; }
C
linux
0
CVE-2018-18352
https://www.cvedetails.com/cve/CVE-2018-18352/
CWE-732
https://github.com/chromium/chromium/commit/a9cbaa7a40e2b2723cfc2f266c42f4980038a949
a9cbaa7a40e2b2723cfc2f266c42f4980038a949
Simplify "WouldTaintOrigin" concept in media/blink Currently WebMediaPlayer has three predicates: - DidGetOpaqueResponseFromServiceWorker - HasSingleSecurityOrigin - DidPassCORSAccessCheck . These are used to determine whether the response body is available for scripts. They are known to be confusing, and actually MediaElementAudioSourceHandler::WouldTaintOrigin misuses them. This CL merges the three predicates to one, WouldTaintOrigin, to remove the confusion. Now the "response type" concept is available and we don't need a custom CORS check, so this CL removes BaseAudioContext::WouldTaintOrigin. This CL also renames URLData::has_opaque_data_ and its (direct and indirect) data accessors to match the spec. Bug: 849942, 875153 Change-Id: I6acf50169d7445c4ff614e80ac606f79ee577d2a Reviewed-on: https://chromium-review.googlesource.com/c/1238098 Reviewed-by: Fredrik Hubinette <[email protected]> Reviewed-by: Kinuko Yasuda <[email protected]> Reviewed-by: Raymond Toy <[email protected]> Commit-Queue: Yutaka Hirano <[email protected]> Cr-Commit-Position: refs/heads/master@{#598258}
MediaElementAudioSourceHandler::MediaElementAudioSourceHandler( AudioNode& node, HTMLMediaElement& media_element) : AudioHandler(kNodeTypeMediaElementAudioSource, node, node.context()->sampleRate()), media_element_(media_element), source_number_of_channels_(0), source_sample_rate_(0), is_origin_tainted_(false) { DCHECK(IsMainThread()); AddOutput(2); if (Context()->GetExecutionContext()) { task_runner_ = Context()->GetExecutionContext()->GetTaskRunner( TaskType::kMediaElementEvent); } Initialize(); }
MediaElementAudioSourceHandler::MediaElementAudioSourceHandler( AudioNode& node, HTMLMediaElement& media_element) : AudioHandler(kNodeTypeMediaElementAudioSource, node, node.context()->sampleRate()), media_element_(media_element), source_number_of_channels_(0), source_sample_rate_(0), is_origin_tainted_(false) { DCHECK(IsMainThread()); AddOutput(2); if (Context()->GetExecutionContext()) { task_runner_ = Context()->GetExecutionContext()->GetTaskRunner( TaskType::kMediaElementEvent); } Initialize(); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/8f883f2b12f68fed993671dce7fb5fb91f2229aa
8f883f2b12f68fed993671dce7fb5fb91f2229aa
Add more non client Windows messages to the list of messages not being sent to the renderer. Turns out we get WM_NCLBUTTONDOWN/UP messages at times which go to the renderer and are not acked causing the unresponsive renderer dialog to show up in Desktop Chrome Aura. BUG=335248 [email protected] TBR=jam Review URL: https://codereview.chromium.org/141103004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@245949 0039d316-1c4b-4281-b951-d872f2087c98
void RenderWidgetHostViewAura::SetKeyboardFocus() { #if defined(OS_WIN) if (CanFocus()) { aura::WindowEventDispatcher* dispatcher = window_->GetDispatcher(); if (dispatcher) ::SetFocus(dispatcher->host()->GetAcceleratedWidget()); } #endif }
void RenderWidgetHostViewAura::SetKeyboardFocus() { #if defined(OS_WIN) if (CanFocus()) { aura::WindowEventDispatcher* dispatcher = window_->GetDispatcher(); if (dispatcher) ::SetFocus(dispatcher->host()->GetAcceleratedWidget()); } #endif }
C
Chrome
0
CVE-2018-13006
https://www.cvedetails.com/cve/CVE-2018-13006/
CWE-125
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
bceb03fd2be95097a7b409ea59914f332fb6bc86
fixed 2 possible heap overflows (inc. #1088)
GF_Box *stco_New() { ISOM_DECL_BOX_ALLOC(GF_ChunkOffsetBox, GF_ISOM_BOX_TYPE_STCO); return (GF_Box *)tmp; }
GF_Box *stco_New() { ISOM_DECL_BOX_ALLOC(GF_ChunkOffsetBox, GF_ISOM_BOX_TYPE_STCO); return (GF_Box *)tmp; }
C
gpac
0
CVE-2016-10066
https://www.cvedetails.com/cve/CVE-2016-10066/
CWE-119
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
null
static void RelinquishBZIPMemory(void *context,void *memory) { (void) context; memory=RelinquishMagickMemory(memory); }
static void RelinquishBZIPMemory(void *context,void *memory) { (void) context; memory=RelinquishMagickMemory(memory); }
C
ImageMagick
0
CVE-2011-3896
https://www.cvedetails.com/cve/CVE-2011-3896/
CWE-119
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
5925dff83699508b5e2735afb0297dfb310e159d
Implement a bubble that appears at the top of the screen when a tab enters fullscreen mode via webkitRequestFullScreen(), telling the user how to exit fullscreen. This is implemented as an NSView rather than an NSWindow because the floating chrome that appears in presentation mode should overlap the bubble. Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac: the mode in which the UI is hidden, accessible by moving the cursor to the top of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode. On Lion, however, fullscreen mode does not imply presentation mode: in non-presentation fullscreen mode, the chrome is permanently shown. It is possible to switch between presentation mode and fullscreen mode using the presentation mode UI control. When a tab initiates fullscreen mode on Lion, we enter presentation mode if not in presentation mode already. When the user exits fullscreen mode using Chrome UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we return the user to the mode they were in before the tab entered fullscreen. BUG=14471 TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen. Need to test the Lion logic somehow, with no Lion trybots. BUG=96883 Original review http://codereview.chromium.org/7890056/ TBR=thakis Review URL: http://codereview.chromium.org/7920024 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
void Browser::OpenInstantConfirmDialog() { ShowOptionsTab(chrome::kInstantConfirmPage); }
void Browser::OpenInstantConfirmDialog() { ShowOptionsTab(chrome::kInstantConfirmPage); }
C
Chrome
0
CVE-2011-3896
https://www.cvedetails.com/cve/CVE-2011-3896/
CWE-119
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
5925dff83699508b5e2735afb0297dfb310e159d
Implement a bubble that appears at the top of the screen when a tab enters fullscreen mode via webkitRequestFullScreen(), telling the user how to exit fullscreen. This is implemented as an NSView rather than an NSWindow because the floating chrome that appears in presentation mode should overlap the bubble. Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac: the mode in which the UI is hidden, accessible by moving the cursor to the top of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode. On Lion, however, fullscreen mode does not imply presentation mode: in non-presentation fullscreen mode, the chrome is permanently shown. It is possible to switch between presentation mode and fullscreen mode using the presentation mode UI control. When a tab initiates fullscreen mode on Lion, we enter presentation mode if not in presentation mode already. When the user exits fullscreen mode using Chrome UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we return the user to the mode they were in before the tab entered fullscreen. BUG=14471 TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen. Need to test the Lion logic somehow, with no Lion trybots. BUG=96883 Original review http://codereview.chromium.org/7890056/ TBR=thakis Review URL: http://codereview.chromium.org/7920024 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
void Browser::RegisterAppPrefs(const std::string& app_name, Profile* profile) { std::string window_pref(prefs::kBrowserWindowPlacement); window_pref.append("_"); window_pref.append(app_name); PrefService* prefs = profile->GetPrefs(); if (!prefs->FindPreference(window_pref.c_str())) { prefs->RegisterDictionaryPref(window_pref.c_str(), PrefService::UNSYNCABLE_PREF); } }
void Browser::RegisterAppPrefs(const std::string& app_name, Profile* profile) { std::string window_pref(prefs::kBrowserWindowPlacement); window_pref.append("_"); window_pref.append(app_name); PrefService* prefs = profile->GetPrefs(); if (!prefs->FindPreference(window_pref.c_str())) { prefs->RegisterDictionaryPref(window_pref.c_str(), PrefService::UNSYNCABLE_PREF); } }
C
Chrome
0
CVE-2017-17807
https://www.cvedetails.com/cve/CVE-2017-17807/
CWE-862
https://github.com/torvalds/linux/commit/4dca6ea1d9432052afb06baf2e3ae78188a4410b
4dca6ea1d9432052afb06baf2e3ae78188a4410b
KEYS: add missing permission check for request_key() destination When the request_key() syscall is not passed a destination keyring, it links the requested key (if constructed) into the "default" request-key keyring. This should require Write permission to the keyring. However, there is actually no permission check. This can be abused to add keys to any keyring to which only Search permission is granted. This is because Search permission allows joining the keyring. keyctl_set_reqkey_keyring(KEY_REQKEY_DEFL_SESSION_KEYRING) then will set the default request-key keyring to the session keyring. Then, request_key() can be used to add keys to the keyring. Both negatively and positively instantiated keys can be added using this method. Adding negative keys is trivial. Adding a positive key is a bit trickier. It requires that either /sbin/request-key positively instantiates the key, or that another thread adds the key to the process keyring at just the right time, such that request_key() misses it initially but then finds it in construct_alloc_key(). Fix this bug by checking for Write permission to the keyring in construct_get_dest_keyring() when the default keyring is being used. We don't do the permission check for non-default keyrings because that was already done by the earlier call to lookup_user_key(). Also, request_key_and_link() is currently passed a 'struct key *' rather than a key_ref_t, so the "possessed" bit is unavailable. We also don't do the permission check for the "requestor keyring", to continue to support the use case described by commit 8bbf4976b59f ("KEYS: Alter use of key instantiation link-to-keyring argument") where /sbin/request-key recursively calls request_key() to add keys to the original requestor's destination keyring. (I don't know of any users who actually do that, though...) Fixes: 3e30148c3d52 ("[PATCH] Keys: Make request-key create an authorisation key") Cc: <[email protected]> # v2.6.13+ Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]>
static int call_sbin_request_key(struct key_construction *cons, const char *op, void *aux) { static char const request_key[] = "/sbin/request-key"; const struct cred *cred = current_cred(); key_serial_t prkey, sskey; struct key *key = cons->key, *authkey = cons->authkey, *keyring, *session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; int ret, i; kenter("{%d},{%d},%s", key->serial, authkey->serial, op); ret = install_user_keyrings(); if (ret < 0) goto error_alloc; /* allocate a new session keyring */ sprintf(desc, "_req.%u", key->serial); cred = get_current_cred(); keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL); put_cred(cred); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error_alloc; } /* attach the auth key to the session keyring */ ret = key_link(keyring, authkey); if (ret < 0) goto error_link; /* record the UID and GID */ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid)); sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid)); /* we say which key is under construction */ sprintf(key_str, "%d", key->serial); /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", cred->thread_keyring ? cred->thread_keyring->serial : 0); prkey = 0; if (cred->process_keyring) prkey = cred->process_keyring->serial; sprintf(keyring_str[1], "%d", prkey); rcu_read_lock(); session = rcu_dereference(cred->session_keyring); if (!session) session = cred->user->session_keyring; sskey = session->serial; rcu_read_unlock(); sprintf(keyring_str[2], "%d", sskey); /* set up a minimal environment */ i = 0; envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[i] = NULL; /* set up the argument list */ i = 0; argv[i++] = (char *)request_key; argv[i++] = (char *) op; argv[i++] = key_str; argv[i++] = uid_str; argv[i++] = gid_str; argv[i++] = keyring_str[0]; argv[i++] = keyring_str[1]; argv[i++] = keyring_str[2]; argv[i] = NULL; /* do it */ ret = call_usermodehelper_keys(request_key, argv, envp, keyring, UMH_WAIT_PROC); kdebug("usermode -> 0x%x", ret); if (ret >= 0) { /* ret is the exit/wait code */ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) || key_validate(key) < 0) ret = -ENOKEY; else /* ignore any errors from userspace if the key was * instantiated */ ret = 0; } error_link: key_put(keyring); error_alloc: complete_request_key(cons, ret); kleave(" = %d", ret); return ret; }
static int call_sbin_request_key(struct key_construction *cons, const char *op, void *aux) { static char const request_key[] = "/sbin/request-key"; const struct cred *cred = current_cred(); key_serial_t prkey, sskey; struct key *key = cons->key, *authkey = cons->authkey, *keyring, *session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; int ret, i; kenter("{%d},{%d},%s", key->serial, authkey->serial, op); ret = install_user_keyrings(); if (ret < 0) goto error_alloc; /* allocate a new session keyring */ sprintf(desc, "_req.%u", key->serial); cred = get_current_cred(); keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL); put_cred(cred); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error_alloc; } /* attach the auth key to the session keyring */ ret = key_link(keyring, authkey); if (ret < 0) goto error_link; /* record the UID and GID */ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid)); sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid)); /* we say which key is under construction */ sprintf(key_str, "%d", key->serial); /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", cred->thread_keyring ? cred->thread_keyring->serial : 0); prkey = 0; if (cred->process_keyring) prkey = cred->process_keyring->serial; sprintf(keyring_str[1], "%d", prkey); rcu_read_lock(); session = rcu_dereference(cred->session_keyring); if (!session) session = cred->user->session_keyring; sskey = session->serial; rcu_read_unlock(); sprintf(keyring_str[2], "%d", sskey); /* set up a minimal environment */ i = 0; envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[i] = NULL; /* set up the argument list */ i = 0; argv[i++] = (char *)request_key; argv[i++] = (char *) op; argv[i++] = key_str; argv[i++] = uid_str; argv[i++] = gid_str; argv[i++] = keyring_str[0]; argv[i++] = keyring_str[1]; argv[i++] = keyring_str[2]; argv[i] = NULL; /* do it */ ret = call_usermodehelper_keys(request_key, argv, envp, keyring, UMH_WAIT_PROC); kdebug("usermode -> 0x%x", ret); if (ret >= 0) { /* ret is the exit/wait code */ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) || key_validate(key) < 0) ret = -ENOKEY; else /* ignore any errors from userspace if the key was * instantiated */ ret = 0; } error_link: key_put(keyring); error_alloc: complete_request_key(cons, ret); kleave(" = %d", ret); return ret; }
C
linux
0
CVE-2018-20856
https://www.cvedetails.com/cve/CVE-2018-20856/
CWE-416
https://github.com/torvalds/linux/commit/54648cf1ec2d7f4b6a71767799c45676a138ca24
54648cf1ec2d7f4b6a71767799c45676a138ca24
block: blk_init_allocated_queue() set q->fq as NULL in the fail case We find the memory use-after-free issue in __blk_drain_queue() on the kernel 4.14. After read the latest kernel 4.18-rc6 we think it has the same problem. Memory is allocated for q->fq in the blk_init_allocated_queue(). If the elevator init function called with error return, it will run into the fail case to free the q->fq. Then the __blk_drain_queue() uses the same memory after the free of the q->fq, it will lead to the unpredictable event. The patch is to set q->fq as NULL in the fail case of blk_init_allocated_queue(). Fixes: commit 7c94e1c157a2 ("block: introduce blk_flush_queue to drive flush machinery") Cc: <[email protected]> Reviewed-by: Ming Lei <[email protected]> Reviewed-by: Bart Van Assche <[email protected]> Signed-off-by: xiao jin <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
blk_qc_t direct_make_request(struct bio *bio) { struct request_queue *q = bio->bi_disk->queue; bool nowait = bio->bi_opf & REQ_NOWAIT; blk_qc_t ret; if (!generic_make_request_checks(bio)) return BLK_QC_T_NONE; if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { if (nowait && !blk_queue_dying(q)) bio->bi_status = BLK_STS_AGAIN; else bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return BLK_QC_T_NONE; } ret = q->make_request_fn(q, bio); blk_queue_exit(q); return ret; }
blk_qc_t direct_make_request(struct bio *bio) { struct request_queue *q = bio->bi_disk->queue; bool nowait = bio->bi_opf & REQ_NOWAIT; blk_qc_t ret; if (!generic_make_request_checks(bio)) return BLK_QC_T_NONE; if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { if (nowait && !blk_queue_dying(q)) bio->bi_status = BLK_STS_AGAIN; else bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return BLK_QC_T_NONE; } ret = q->make_request_fn(q, bio); blk_queue_exit(q); return ret; }
C
linux
0
CVE-2017-7562
https://www.cvedetails.com/cve/CVE-2017-7562/
CWE-287
https://github.com/krb5/krb5/pull/694/commits/50fe4074f188c2d4da0c421e96553acea8378db2
50fe4074f188c2d4da0c421e96553acea8378db2
Fix certauth built-in module returns The PKINIT certauth eku module should never authoritatively authorize a certificate, because an extended key usage does not establish a relationship between the certificate and any specific user; it only establishes that the certificate was created for PKINIT client authentication. Therefore, pkinit_eku_authorize() should return KRB5_PLUGIN_NO_HANDLE on success, not 0. The certauth san module should pass if it does not find any SANs of the types it can match against; the presence of other types of SANs should not cause it to explicitly deny a certificate. Check for an empty result from crypto_retrieve_cert_sans() in verify_client_san(), instead of returning ENOENT from crypto_retrieve_cert_sans() when there are no SANs at all. ticket: 8561
pkinit_server_verify_padata(krb5_context context, krb5_data *req_pkt, krb5_kdc_req * request, krb5_enc_tkt_part * enc_tkt_reply, krb5_pa_data * data, krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock, krb5_kdcpreauth_moddata moddata, krb5_kdcpreauth_verify_respond_fn respond, void *arg) { krb5_error_code retval = 0; krb5_data authp_data = {0, 0, NULL}, krb5_authz = {0, 0, NULL}; krb5_pa_pk_as_req *reqp = NULL; krb5_pa_pk_as_req_draft9 *reqp9 = NULL; krb5_auth_pack *auth_pack = NULL; krb5_auth_pack_draft9 *auth_pack9 = NULL; pkinit_kdc_context plgctx = NULL; pkinit_kdc_req_context reqctx = NULL; krb5_checksum cksum = {0, 0, 0, NULL}; krb5_data *der_req = NULL; krb5_data k5data; int is_signed = 1; krb5_pa_data **e_data = NULL; krb5_kdcpreauth_modreq modreq = NULL; char **sp; pkiDebug("pkinit_verify_padata: entered!\n"); if (data == NULL || data->length <= 0 || data->contents == NULL) { (*respond)(arg, EINVAL, NULL, NULL, NULL); return; } if (moddata == NULL) { (*respond)(arg, EINVAL, NULL, NULL, NULL); return; } plgctx = pkinit_find_realm_context(context, moddata, request->server); if (plgctx == NULL) { (*respond)(arg, EINVAL, NULL, NULL, NULL); return; } #ifdef DEBUG_ASN1 print_buffer_bin(data->contents, data->length, "/tmp/kdc_as_req"); #endif /* create a per-request context */ retval = pkinit_init_kdc_req_context(context, &reqctx); if (retval) goto cleanup; reqctx->pa_type = data->pa_type; PADATA_TO_KRB5DATA(data, &k5data); switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: TRACE_PKINIT_SERVER_PADATA_VERIFY(context); retval = k5int_decode_krb5_pa_pk_as_req(&k5data, &reqp); if (retval) { pkiDebug("decode_krb5_pa_pk_as_req failed\n"); goto cleanup; } #ifdef DEBUG_ASN1 print_buffer_bin(reqp->signedAuthPack.data, reqp->signedAuthPack.length, "/tmp/kdc_signed_data"); #endif retval = cms_signeddata_verify(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_CLIENT, plgctx->opts->require_crl_checking, (unsigned char *) reqp->signedAuthPack.data, reqp->signedAuthPack.length, (unsigned char **)&authp_data.data, &authp_data.length, (unsigned char **)&krb5_authz.data, &krb5_authz.length, &is_signed); break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: TRACE_PKINIT_SERVER_PADATA_VERIFY_OLD(context); retval = k5int_decode_krb5_pa_pk_as_req_draft9(&k5data, &reqp9); if (retval) { pkiDebug("decode_krb5_pa_pk_as_req_draft9 failed\n"); goto cleanup; } #ifdef DEBUG_ASN1 print_buffer_bin(reqp9->signedAuthPack.data, reqp9->signedAuthPack.length, "/tmp/kdc_signed_data_draft9"); #endif retval = cms_signeddata_verify(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_DRAFT9, plgctx->opts->require_crl_checking, (unsigned char *) reqp9->signedAuthPack.data, reqp9->signedAuthPack.length, (unsigned char **)&authp_data.data, &authp_data.length, (unsigned char **)&krb5_authz.data, &krb5_authz.length, NULL); break; default: pkiDebug("unrecognized pa_type = %d\n", data->pa_type); retval = EINVAL; goto cleanup; } if (retval) { TRACE_PKINIT_SERVER_PADATA_VERIFY_FAIL(context); goto cleanup; } if (is_signed) { retval = authorize_cert(context, moddata->certauth_modules, plgctx, reqctx, cb, rock, request->client); if (retval) goto cleanup; } else { /* !is_signed */ if (!krb5_principal_compare(context, request->client, krb5_anonymous_principal())) { retval = KRB5KDC_ERR_PREAUTH_FAILED; krb5_set_error_message(context, retval, _("Pkinit request not signed, but client " "not anonymous.")); goto cleanup; } } #ifdef DEBUG_ASN1 print_buffer_bin(authp_data.data, authp_data.length, "/tmp/kdc_auth_pack"); #endif OCTETDATA_TO_KRB5DATA(&authp_data, &k5data); switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: retval = k5int_decode_krb5_auth_pack(&k5data, &auth_pack); if (retval) { pkiDebug("failed to decode krb5_auth_pack\n"); goto cleanup; } retval = krb5_check_clockskew(context, auth_pack->pkAuthenticator.ctime); if (retval) goto cleanup; /* check dh parameters */ if (auth_pack->clientPublicValue != NULL) { retval = server_check_dh(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, &auth_pack->clientPublicValue->algorithm.parameters, plgctx->opts->dh_min_bits); if (retval) { pkiDebug("bad dh parameters\n"); goto cleanup; } } else if (!is_signed) { /*Anonymous pkinit requires DH*/ retval = KRB5KDC_ERR_PREAUTH_FAILED; krb5_set_error_message(context, retval, _("Anonymous pkinit without DH public " "value not supported.")); goto cleanup; } der_req = cb->request_body(context, rock); retval = krb5_c_make_checksum(context, CKSUMTYPE_NIST_SHA, NULL, 0, der_req, &cksum); if (retval) { pkiDebug("unable to calculate AS REQ checksum\n"); goto cleanup; } if (cksum.length != auth_pack->pkAuthenticator.paChecksum.length || k5_bcmp(cksum.contents, auth_pack->pkAuthenticator.paChecksum.contents, cksum.length) != 0) { pkiDebug("failed to match the checksum\n"); #ifdef DEBUG_CKSUM pkiDebug("calculating checksum on buf size (%d)\n", req_pkt->length); print_buffer(req_pkt->data, req_pkt->length); pkiDebug("received checksum type=%d size=%d ", auth_pack->pkAuthenticator.paChecksum.checksum_type, auth_pack->pkAuthenticator.paChecksum.length); print_buffer(auth_pack->pkAuthenticator.paChecksum.contents, auth_pack->pkAuthenticator.paChecksum.length); pkiDebug("expected checksum type=%d size=%d ", cksum.checksum_type, cksum.length); print_buffer(cksum.contents, cksum.length); #endif retval = KRB5KDC_ERR_PA_CHECKSUM_MUST_BE_INCLUDED; goto cleanup; } /* check if kdcPkId present and match KDC's subjectIdentifier */ if (reqp->kdcPkId.data != NULL) { int valid_kdcPkId = 0; retval = pkinit_check_kdc_pkid(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, (unsigned char *)reqp->kdcPkId.data, reqp->kdcPkId.length, &valid_kdcPkId); if (retval) goto cleanup; if (!valid_kdcPkId) pkiDebug("kdcPkId in AS_REQ does not match KDC's cert" "RFC says to ignore and proceed\n"); } /* remember the decoded auth_pack for verify_padata routine */ reqctx->rcv_auth_pack = auth_pack; auth_pack = NULL; break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: retval = k5int_decode_krb5_auth_pack_draft9(&k5data, &auth_pack9); if (retval) { pkiDebug("failed to decode krb5_auth_pack_draft9\n"); goto cleanup; } if (auth_pack9->clientPublicValue != NULL) { retval = server_check_dh(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, &auth_pack9->clientPublicValue->algorithm.parameters, plgctx->opts->dh_min_bits); if (retval) { pkiDebug("bad dh parameters\n"); goto cleanup; } } /* remember the decoded auth_pack for verify_padata routine */ reqctx->rcv_auth_pack9 = auth_pack9; auth_pack9 = NULL; break; } if (is_signed && plgctx->auth_indicators != NULL) { /* Assert configured authentication indicators. */ for (sp = plgctx->auth_indicators; *sp != NULL; sp++) { retval = cb->add_auth_indicator(context, rock, *sp); if (retval) goto cleanup; } } /* remember to set the PREAUTH flag in the reply */ enc_tkt_reply->flags |= TKT_FLG_PRE_AUTH; modreq = (krb5_kdcpreauth_modreq)reqctx; reqctx = NULL; cleanup: if (retval && data->pa_type == KRB5_PADATA_PK_AS_REQ) { pkiDebug("pkinit_verify_padata failed: creating e-data\n"); if (pkinit_create_edata(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, plgctx->opts, retval, &e_data)) pkiDebug("pkinit_create_edata failed\n"); } switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: free_krb5_pa_pk_as_req(&reqp); free(cksum.contents); break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: free_krb5_pa_pk_as_req_draft9(&reqp9); } free(authp_data.data); free(krb5_authz.data); if (reqctx != NULL) pkinit_fini_kdc_req_context(context, reqctx); free_krb5_auth_pack(&auth_pack); free_krb5_auth_pack_draft9(context, &auth_pack9); (*respond)(arg, retval, modreq, e_data, NULL); }
pkinit_server_verify_padata(krb5_context context, krb5_data *req_pkt, krb5_kdc_req * request, krb5_enc_tkt_part * enc_tkt_reply, krb5_pa_data * data, krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock, krb5_kdcpreauth_moddata moddata, krb5_kdcpreauth_verify_respond_fn respond, void *arg) { krb5_error_code retval = 0; krb5_data authp_data = {0, 0, NULL}, krb5_authz = {0, 0, NULL}; krb5_pa_pk_as_req *reqp = NULL; krb5_pa_pk_as_req_draft9 *reqp9 = NULL; krb5_auth_pack *auth_pack = NULL; krb5_auth_pack_draft9 *auth_pack9 = NULL; pkinit_kdc_context plgctx = NULL; pkinit_kdc_req_context reqctx = NULL; krb5_checksum cksum = {0, 0, 0, NULL}; krb5_data *der_req = NULL; krb5_data k5data; int is_signed = 1; krb5_pa_data **e_data = NULL; krb5_kdcpreauth_modreq modreq = NULL; char **sp; pkiDebug("pkinit_verify_padata: entered!\n"); if (data == NULL || data->length <= 0 || data->contents == NULL) { (*respond)(arg, EINVAL, NULL, NULL, NULL); return; } if (moddata == NULL) { (*respond)(arg, EINVAL, NULL, NULL, NULL); return; } plgctx = pkinit_find_realm_context(context, moddata, request->server); if (plgctx == NULL) { (*respond)(arg, EINVAL, NULL, NULL, NULL); return; } #ifdef DEBUG_ASN1 print_buffer_bin(data->contents, data->length, "/tmp/kdc_as_req"); #endif /* create a per-request context */ retval = pkinit_init_kdc_req_context(context, &reqctx); if (retval) goto cleanup; reqctx->pa_type = data->pa_type; PADATA_TO_KRB5DATA(data, &k5data); switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: TRACE_PKINIT_SERVER_PADATA_VERIFY(context); retval = k5int_decode_krb5_pa_pk_as_req(&k5data, &reqp); if (retval) { pkiDebug("decode_krb5_pa_pk_as_req failed\n"); goto cleanup; } #ifdef DEBUG_ASN1 print_buffer_bin(reqp->signedAuthPack.data, reqp->signedAuthPack.length, "/tmp/kdc_signed_data"); #endif retval = cms_signeddata_verify(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_CLIENT, plgctx->opts->require_crl_checking, (unsigned char *) reqp->signedAuthPack.data, reqp->signedAuthPack.length, (unsigned char **)&authp_data.data, &authp_data.length, (unsigned char **)&krb5_authz.data, &krb5_authz.length, &is_signed); break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: TRACE_PKINIT_SERVER_PADATA_VERIFY_OLD(context); retval = k5int_decode_krb5_pa_pk_as_req_draft9(&k5data, &reqp9); if (retval) { pkiDebug("decode_krb5_pa_pk_as_req_draft9 failed\n"); goto cleanup; } #ifdef DEBUG_ASN1 print_buffer_bin(reqp9->signedAuthPack.data, reqp9->signedAuthPack.length, "/tmp/kdc_signed_data_draft9"); #endif retval = cms_signeddata_verify(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_DRAFT9, plgctx->opts->require_crl_checking, (unsigned char *) reqp9->signedAuthPack.data, reqp9->signedAuthPack.length, (unsigned char **)&authp_data.data, &authp_data.length, (unsigned char **)&krb5_authz.data, &krb5_authz.length, NULL); break; default: pkiDebug("unrecognized pa_type = %d\n", data->pa_type); retval = EINVAL; goto cleanup; } if (retval) { TRACE_PKINIT_SERVER_PADATA_VERIFY_FAIL(context); goto cleanup; } if (is_signed) { retval = authorize_cert(context, moddata->certauth_modules, plgctx, reqctx, cb, rock, request->client); if (retval) goto cleanup; } else { /* !is_signed */ if (!krb5_principal_compare(context, request->client, krb5_anonymous_principal())) { retval = KRB5KDC_ERR_PREAUTH_FAILED; krb5_set_error_message(context, retval, _("Pkinit request not signed, but client " "not anonymous.")); goto cleanup; } } #ifdef DEBUG_ASN1 print_buffer_bin(authp_data.data, authp_data.length, "/tmp/kdc_auth_pack"); #endif OCTETDATA_TO_KRB5DATA(&authp_data, &k5data); switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: retval = k5int_decode_krb5_auth_pack(&k5data, &auth_pack); if (retval) { pkiDebug("failed to decode krb5_auth_pack\n"); goto cleanup; } retval = krb5_check_clockskew(context, auth_pack->pkAuthenticator.ctime); if (retval) goto cleanup; /* check dh parameters */ if (auth_pack->clientPublicValue != NULL) { retval = server_check_dh(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, &auth_pack->clientPublicValue->algorithm.parameters, plgctx->opts->dh_min_bits); if (retval) { pkiDebug("bad dh parameters\n"); goto cleanup; } } else if (!is_signed) { /*Anonymous pkinit requires DH*/ retval = KRB5KDC_ERR_PREAUTH_FAILED; krb5_set_error_message(context, retval, _("Anonymous pkinit without DH public " "value not supported.")); goto cleanup; } der_req = cb->request_body(context, rock); retval = krb5_c_make_checksum(context, CKSUMTYPE_NIST_SHA, NULL, 0, der_req, &cksum); if (retval) { pkiDebug("unable to calculate AS REQ checksum\n"); goto cleanup; } if (cksum.length != auth_pack->pkAuthenticator.paChecksum.length || k5_bcmp(cksum.contents, auth_pack->pkAuthenticator.paChecksum.contents, cksum.length) != 0) { pkiDebug("failed to match the checksum\n"); #ifdef DEBUG_CKSUM pkiDebug("calculating checksum on buf size (%d)\n", req_pkt->length); print_buffer(req_pkt->data, req_pkt->length); pkiDebug("received checksum type=%d size=%d ", auth_pack->pkAuthenticator.paChecksum.checksum_type, auth_pack->pkAuthenticator.paChecksum.length); print_buffer(auth_pack->pkAuthenticator.paChecksum.contents, auth_pack->pkAuthenticator.paChecksum.length); pkiDebug("expected checksum type=%d size=%d ", cksum.checksum_type, cksum.length); print_buffer(cksum.contents, cksum.length); #endif retval = KRB5KDC_ERR_PA_CHECKSUM_MUST_BE_INCLUDED; goto cleanup; } /* check if kdcPkId present and match KDC's subjectIdentifier */ if (reqp->kdcPkId.data != NULL) { int valid_kdcPkId = 0; retval = pkinit_check_kdc_pkid(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, (unsigned char *)reqp->kdcPkId.data, reqp->kdcPkId.length, &valid_kdcPkId); if (retval) goto cleanup; if (!valid_kdcPkId) pkiDebug("kdcPkId in AS_REQ does not match KDC's cert" "RFC says to ignore and proceed\n"); } /* remember the decoded auth_pack for verify_padata routine */ reqctx->rcv_auth_pack = auth_pack; auth_pack = NULL; break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: retval = k5int_decode_krb5_auth_pack_draft9(&k5data, &auth_pack9); if (retval) { pkiDebug("failed to decode krb5_auth_pack_draft9\n"); goto cleanup; } if (auth_pack9->clientPublicValue != NULL) { retval = server_check_dh(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, &auth_pack9->clientPublicValue->algorithm.parameters, plgctx->opts->dh_min_bits); if (retval) { pkiDebug("bad dh parameters\n"); goto cleanup; } } /* remember the decoded auth_pack for verify_padata routine */ reqctx->rcv_auth_pack9 = auth_pack9; auth_pack9 = NULL; break; } if (is_signed && plgctx->auth_indicators != NULL) { /* Assert configured authentication indicators. */ for (sp = plgctx->auth_indicators; *sp != NULL; sp++) { retval = cb->add_auth_indicator(context, rock, *sp); if (retval) goto cleanup; } } /* remember to set the PREAUTH flag in the reply */ enc_tkt_reply->flags |= TKT_FLG_PRE_AUTH; modreq = (krb5_kdcpreauth_modreq)reqctx; reqctx = NULL; cleanup: if (retval && data->pa_type == KRB5_PADATA_PK_AS_REQ) { pkiDebug("pkinit_verify_padata failed: creating e-data\n"); if (pkinit_create_edata(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, plgctx->opts, retval, &e_data)) pkiDebug("pkinit_create_edata failed\n"); } switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: free_krb5_pa_pk_as_req(&reqp); free(cksum.contents); break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: free_krb5_pa_pk_as_req_draft9(&reqp9); } free(authp_data.data); free(krb5_authz.data); if (reqctx != NULL) pkinit_fini_kdc_req_context(context, reqctx); free_krb5_auth_pack(&auth_pack); free_krb5_auth_pack_draft9(context, &auth_pack9); (*respond)(arg, retval, modreq, e_data, NULL); }
C
krb5
0
CVE-2018-20855
https://www.cvedetails.com/cve/CVE-2018-20855/
CWE-119
https://github.com/torvalds/linux/commit/0625b4ba1a5d4703c7fb01c497bd6c156908af00
0625b4ba1a5d4703c7fb01c497bd6c156908af00
IB/mlx5: Fix leaking stack memory to userspace mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes were written. Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp") Cc: <[email protected]> Acked-by: Leon Romanovsky <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) { sig->signature = calc_sig(sig, size); }
static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) { sig->signature = calc_sig(sig, size); }
C
linux
0
CVE-2011-2350
https://www.cvedetails.com/cve/CVE-2011-2350/
CWE-20
https://github.com/chromium/chromium/commit/b944f670bb7a8a919daac497a4ea0536c954c201
b944f670bb7a8a919daac497a4ea0536c954c201
[JSC] Implement a helper method createNotEnoughArgumentsError() https://bugs.webkit.org/show_bug.cgi?id=85102 Reviewed by Geoffrey Garen. In bug 84787, kbr@ requested to avoid hard-coding createTypeError(exec, "Not enough arguments") here and there. This patch implements createNotEnoughArgumentsError(exec) and uses it in JSC bindings. c.f. a corresponding bug for V8 bindings is bug 85097. Source/JavaScriptCore: * runtime/Error.cpp: (JSC::createNotEnoughArgumentsError): (JSC): * runtime/Error.h: (JSC): Source/WebCore: Test: bindings/scripts/test/TestObj.idl * bindings/scripts/CodeGeneratorJS.pm: Modified as described above. (GenerateArgumentsCountCheck): * bindings/js/JSDataViewCustom.cpp: Ditto. (WebCore::getDataViewMember): (WebCore::setDataViewMember): * bindings/js/JSDeprecatedPeerConnectionCustom.cpp: (WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection): * bindings/js/JSDirectoryEntryCustom.cpp: (WebCore::JSDirectoryEntry::getFile): (WebCore::JSDirectoryEntry::getDirectory): * bindings/js/JSSharedWorkerCustom.cpp: (WebCore::JSSharedWorkerConstructor::constructJSSharedWorker): * bindings/js/JSWebKitMutationObserverCustom.cpp: (WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver): (WebCore::JSWebKitMutationObserver::observe): * bindings/js/JSWorkerCustom.cpp: (WebCore::JSWorkerConstructor::constructJSWorker): * bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests. (WebCore::jsFloat64ArrayPrototypeFunctionFoo): * bindings/scripts/test/JS/JSTestActiveDOMObject.cpp: (WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction): (WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage): * bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp: (WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction): * bindings/scripts/test/JS/JSTestEventTarget.cpp: (WebCore::jsTestEventTargetPrototypeFunctionItem): (WebCore::jsTestEventTargetPrototypeFunctionAddEventListener): (WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener): (WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent): * bindings/scripts/test/JS/JSTestInterface.cpp: (WebCore::JSTestInterfaceConstructor::constructJSTestInterface): (WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2): * bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp: (WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod): * bindings/scripts/test/JS/JSTestNamedConstructor.cpp: (WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor): * bindings/scripts/test/JS/JSTestObj.cpp: (WebCore::JSTestObjConstructor::constructJSTestObj): (WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg): (WebCore::jsTestObjPrototypeFunctionMethodReturningSequence): (WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows): (WebCore::jsTestObjPrototypeFunctionSerializedValue): (WebCore::jsTestObjPrototypeFunctionIdbKey): (WebCore::jsTestObjPrototypeFunctionOptionsObject): (WebCore::jsTestObjPrototypeFunctionAddEventListener): (WebCore::jsTestObjPrototypeFunctionRemoveEventListener): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs): (WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg): (WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod1): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod2): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod3): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod4): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod5): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod6): (WebCore::jsTestObjPrototypeFunctionOverloadedMethod7): (WebCore::jsTestObjConstructorFunctionClassMethod2): (WebCore::jsTestObjConstructorFunctionOverloadedMethod11): (WebCore::jsTestObjConstructorFunctionOverloadedMethod12): (WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray): (WebCore::jsTestObjPrototypeFunctionConvert1): (WebCore::jsTestObjPrototypeFunctionConvert2): (WebCore::jsTestObjPrototypeFunctionConvert3): (WebCore::jsTestObjPrototypeFunctionConvert4): (WebCore::jsTestObjPrototypeFunctionConvert5): (WebCore::jsTestObjPrototypeFunctionStrictFunction): * bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp: (WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface): (WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList): git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538
JSTestActiveDOMObject::JSTestActiveDOMObject(Structure* structure, JSDOMGlobalObject* globalObject, PassRefPtr<TestActiveDOMObject> impl) : JSDOMWrapper(structure, globalObject) , m_impl(impl.leakRef()) { }
JSTestActiveDOMObject::JSTestActiveDOMObject(Structure* structure, JSDOMGlobalObject* globalObject, PassRefPtr<TestActiveDOMObject> impl) : JSDOMWrapper(structure, globalObject) , m_impl(impl.leakRef()) { }
C
Chrome
0
CVE-2017-15423
https://www.cvedetails.com/cve/CVE-2017-15423/
CWE-310
https://github.com/chromium/chromium/commit/a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
Roll src/third_party/boringssl/src 664e99a64..696c13bd6 https://boringssl.googlesource.com/boringssl/+log/664e99a6486c293728097c661332f92bf2d847c6..696c13bd6ab78011adfe7b775519c8b7cc82b604 BUG=778101 Change-Id: I8dda4f3db952597148e3c7937319584698d00e1c Reviewed-on: https://chromium-review.googlesource.com/747941 Reviewed-by: Avi Drissman <[email protected]> Reviewed-by: David Benjamin <[email protected]> Commit-Queue: Steven Valdez <[email protected]> Cr-Commit-Position: refs/heads/master@{#513774}
void BrowserMainLoop::SynchronouslyFlushStartupTasks() { startup_task_runner_->RunAllTasksNow(); }
void BrowserMainLoop::SynchronouslyFlushStartupTasks() { startup_task_runner_->RunAllTasksNow(); }
C
Chrome
0
CVE-2011-3209
https://www.cvedetails.com/cve/CVE-2011-3209/
CWE-189
https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d
f8bd2258e2d520dff28c855658bd24bdafb5102d
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: john stultz <[email protected]> Cc: Christoph Lameter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline unsigned long slabs_node(struct kmem_cache *s, int node) { return 0; }
static inline unsigned long slabs_node(struct kmem_cache *s, int node) { return 0; }
C
linux
0
CVE-2018-6159
https://www.cvedetails.com/cve/CVE-2018-6159/
CWE-200
https://github.com/chromium/chromium/commit/01b42e2bc2aac531b17596729ae4e5c223ae7124
01b42e2bc2aac531b17596729ae4e5c223ae7124
Fix timing allow check algorithm for service workers This CL uses the OriginalURLViaServiceWorker() in the timing allow check algorithm if the response WasFetchedViaServiceWorker(). This way, if a service worker changes a same origin request to become cross origin, then the timing allow check algorithm will still fail. resource-timing-worker.js is changed so it avoids an empty Response, which is an odd case in terms of same origin checks. Bug: 837275 Change-Id: I7e497a6fcc2ee14244121b915ca5f5cceded417a Reviewed-on: https://chromium-review.googlesource.com/1038229 Commit-Queue: Nicolás Peña Moreno <[email protected]> Reviewed-by: Yoav Weiss <[email protected]> Reviewed-by: Timothy Dresser <[email protected]> Cr-Commit-Position: refs/heads/master@{#555476}
WebResourceTimingInfo Performance::GenerateResourceTiming( const SecurityOrigin& destination_origin, const ResourceTimingInfo& info, ExecutionContext& context_for_use_counter) { const ResourceResponse& final_response = info.FinalResponse(); WebResourceTimingInfo result; result.name = info.InitialURL().GetString(); result.start_time = info.InitialTime(); result.alpn_negotiated_protocol = final_response.AlpnNegotiatedProtocol(); result.connection_info = final_response.ConnectionInfoString(); result.timing = final_response.GetResourceLoadTiming(); result.finish_time = info.LoadFinishTime(); result.allow_timing_details = PassesTimingAllowCheck( final_response, destination_origin, info.OriginalTimingAllowOrigin(), &context_for_use_counter); const Vector<ResourceResponse>& redirect_chain = info.RedirectChain(); if (!redirect_chain.IsEmpty()) { result.allow_redirect_details = AllowsTimingRedirect(redirect_chain, final_response, destination_origin, &context_for_use_counter); if (ResourceLoadTiming* last_chained_timing = redirect_chain.back().GetResourceLoadTiming()) { result.last_redirect_end_time = TimeTicksInSeconds(last_chained_timing->ReceiveHeadersEnd()); } else { result.allow_redirect_details = false; result.last_redirect_end_time = 0.0; } if (!result.allow_redirect_details) { if (ResourceLoadTiming* final_timing = final_response.GetResourceLoadTiming()) { result.start_time = TimeTicksInSeconds(final_timing->RequestTime()); } } } else { result.allow_redirect_details = false; result.last_redirect_end_time = 0.0; } result.transfer_size = info.TransferSize(); result.encoded_body_size = final_response.EncodedBodyLength(); result.decoded_body_size = final_response.DecodedBodyLength(); result.did_reuse_connection = final_response.ConnectionReused(); result.allow_negative_values = info.NegativeAllowed(); if (result.allow_timing_details) { result.server_timing = PerformanceServerTiming::ParseServerTiming(info); } if (!result.server_timing.empty()) { UseCounter::Count(&context_for_use_counter, WebFeature::kPerformanceServerTiming); } return result; }
WebResourceTimingInfo Performance::GenerateResourceTiming( const SecurityOrigin& destination_origin, const ResourceTimingInfo& info, ExecutionContext& context_for_use_counter) { const ResourceResponse& final_response = info.FinalResponse(); WebResourceTimingInfo result; result.name = info.InitialURL().GetString(); result.start_time = info.InitialTime(); result.alpn_negotiated_protocol = final_response.AlpnNegotiatedProtocol(); result.connection_info = final_response.ConnectionInfoString(); result.timing = final_response.GetResourceLoadTiming(); result.finish_time = info.LoadFinishTime(); result.allow_timing_details = PassesTimingAllowCheck( final_response, destination_origin, info.OriginalTimingAllowOrigin(), &context_for_use_counter); const Vector<ResourceResponse>& redirect_chain = info.RedirectChain(); if (!redirect_chain.IsEmpty()) { result.allow_redirect_details = AllowsTimingRedirect(redirect_chain, final_response, destination_origin, &context_for_use_counter); if (ResourceLoadTiming* last_chained_timing = redirect_chain.back().GetResourceLoadTiming()) { result.last_redirect_end_time = TimeTicksInSeconds(last_chained_timing->ReceiveHeadersEnd()); } else { result.allow_redirect_details = false; result.last_redirect_end_time = 0.0; } if (!result.allow_redirect_details) { if (ResourceLoadTiming* final_timing = final_response.GetResourceLoadTiming()) { result.start_time = TimeTicksInSeconds(final_timing->RequestTime()); } } } else { result.allow_redirect_details = false; result.last_redirect_end_time = 0.0; } result.transfer_size = info.TransferSize(); result.encoded_body_size = final_response.EncodedBodyLength(); result.decoded_body_size = final_response.DecodedBodyLength(); result.did_reuse_connection = final_response.ConnectionReused(); result.allow_negative_values = info.NegativeAllowed(); if (result.allow_timing_details) { result.server_timing = PerformanceServerTiming::ParseServerTiming(info); } if (!result.server_timing.empty()) { UseCounter::Count(&context_for_use_counter, WebFeature::kPerformanceServerTiming); } return result; }
C
Chrome
0
CVE-2015-2686
https://www.cvedetails.com/cve/CVE-2015-2686/
CWE-264
https://github.com/torvalds/linux/commit/4de930efc23b92ddf88ce91c405ee645fe6e27ea
4de930efc23b92ddf88ce91c405ee645fe6e27ea
net: validate the range we feed to iov_iter_init() in sys_sendto/sys_recvfrom Cc: [email protected] # v3.19 Signed-off-by: Al Viro <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; int ret; init_sync_kiocb(&iocb, NULL); ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; }
static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; int ret; init_sync_kiocb(&iocb, NULL); ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; }
C
linux
0
CVE-2016-9262
https://www.cvedetails.com/cve/CVE-2016-9262/
CWE-190
https://github.com/mdadams/jasper/commit/634ce8e8a5accc0fa05dd2c20d42b4749d4b2735
634ce8e8a5accc0fa05dd2c20d42b4749d4b2735
Made some changes to the I/O stream library for memory streams. There were a number of potential problems due to the possibility of integer overflow. Changed some integral types to the larger types size_t or ssize_t. For example, the function mem_resize now takes the buffer size parameter as a size_t. Added a new function jas_stream_memopen2, which takes a buffer size specified as a size_t instead of an int. This can be used in jas_image_cmpt_create to avoid potential overflow problems. Added a new function jas_deprecated to warn about reliance on deprecated library behavior.
jas_stream_t *jas_stream_tmpfile() { jas_stream_t *stream; jas_stream_fileobj_t *obj; if (!(stream = jas_stream_create())) { return 0; } /* A temporary file stream is always opened for both reading and writing in binary mode. */ stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY; /* Allocate memory for the underlying temporary file object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = -1; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = obj; /* Choose a file name. */ tmpnam(obj->pathname); /* Open the underlying file. */ if ((obj->fd = open(obj->pathname, O_CREAT | O_EXCL | O_RDWR | O_TRUNC | O_BINARY, JAS_STREAM_PERMS)) < 0) { jas_stream_destroy(stream); return 0; } /* Unlink the file so that it will disappear if the program terminates abnormally. */ /* Under UNIX, one can unlink an open file and continue to do I/O on it. Not all operating systems support this functionality, however. For example, under Microsoft Windows the unlink operation will fail, since the file is open. */ if (unlink(obj->pathname)) { /* We will try unlinking the file again after it is closed. */ obj->flags |= JAS_STREAM_FILEOBJ_DELONCLOSE; } /* Use full buffering. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); stream->ops_ = &jas_stream_fileops; return stream; }
jas_stream_t *jas_stream_tmpfile() { jas_stream_t *stream; jas_stream_fileobj_t *obj; if (!(stream = jas_stream_create())) { return 0; } /* A temporary file stream is always opened for both reading and writing in binary mode. */ stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY; /* Allocate memory for the underlying temporary file object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = -1; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = obj; /* Choose a file name. */ tmpnam(obj->pathname); /* Open the underlying file. */ if ((obj->fd = open(obj->pathname, O_CREAT | O_EXCL | O_RDWR | O_TRUNC | O_BINARY, JAS_STREAM_PERMS)) < 0) { jas_stream_destroy(stream); return 0; } /* Unlink the file so that it will disappear if the program terminates abnormally. */ /* Under UNIX, one can unlink an open file and continue to do I/O on it. Not all operating systems support this functionality, however. For example, under Microsoft Windows the unlink operation will fail, since the file is open. */ if (unlink(obj->pathname)) { /* We will try unlinking the file again after it is closed. */ obj->flags |= JAS_STREAM_FILEOBJ_DELONCLOSE; } /* Use full buffering. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); stream->ops_ = &jas_stream_fileops; return stream; }
C
jasper
0
null
null
null
https://github.com/chromium/chromium/commit/befb46ae3385fa13975521e9a2281e35805b339e
befb46ae3385fa13975521e9a2281e35805b339e
2009-10-23 Chris Evans <[email protected]> Reviewed by Adam Barth. Added test for bug 27239 (ignore Refresh for view source mode). https://bugs.webkit.org/show_bug.cgi?id=27239 * http/tests/security/view-source-no-refresh.html: Added * http/tests/security/view-source-no-refresh-expected.txt: Added * http/tests/security/resources/view-source-no-refresh.php: Added 2009-10-23 Chris Evans <[email protected]> Reviewed by Adam Barth. Ignore the Refresh header if we're in view source mode. https://bugs.webkit.org/show_bug.cgi?id=27239 Test: http/tests/security/view-source-no-refresh.html * loader/FrameLoader.cpp: ignore Refresh in view-source mode. git-svn-id: svn://svn.chromium.org/blink/trunk@50018 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void FrameLoader::loadURLIntoChildFrame(const KURL& url, const String& referer, Frame* childFrame) { ASSERT(childFrame); HistoryItem* parentItem = history()->currentItem(); FrameLoadType loadType = this->loadType(); FrameLoadType childLoadType = FrameLoadTypeRedirectWithLockedBackForwardList; KURL workingURL = url; if (parentItem && parentItem->children().size() && isBackForwardLoadType(loadType)) { HistoryItem* childItem = parentItem->childItemWithTarget(childFrame->tree()->name()); if (childItem) { workingURL = KURL(ParsedURLString, childItem->originalURLString()); childLoadType = loadType; childFrame->loader()->history()->setProvisionalItem(childItem); } } RefPtr<Archive> subframeArchive = activeDocumentLoader()->popArchiveForSubframe(childFrame->tree()->name()); if (subframeArchive) childFrame->loader()->loadArchive(subframeArchive.release()); else childFrame->loader()->loadURL(workingURL, referer, String(), false, childLoadType, 0, 0); }
void FrameLoader::loadURLIntoChildFrame(const KURL& url, const String& referer, Frame* childFrame) { ASSERT(childFrame); HistoryItem* parentItem = history()->currentItem(); FrameLoadType loadType = this->loadType(); FrameLoadType childLoadType = FrameLoadTypeRedirectWithLockedBackForwardList; KURL workingURL = url; if (parentItem && parentItem->children().size() && isBackForwardLoadType(loadType)) { HistoryItem* childItem = parentItem->childItemWithTarget(childFrame->tree()->name()); if (childItem) { workingURL = KURL(ParsedURLString, childItem->originalURLString()); childLoadType = loadType; childFrame->loader()->history()->setProvisionalItem(childItem); } } RefPtr<Archive> subframeArchive = activeDocumentLoader()->popArchiveForSubframe(childFrame->tree()->name()); if (subframeArchive) childFrame->loader()->loadArchive(subframeArchive.release()); else childFrame->loader()->loadURL(workingURL, referer, String(), false, childLoadType, 0, 0); }
C
Chrome
0
CVE-2017-5019
https://www.cvedetails.com/cve/CVE-2017-5019/
CWE-416
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
Convert FrameHostMsg_DidAddMessageToConsole to Mojo. Note: Since this required changing the test RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually re-introduced https://crbug.com/666714 locally (the bug the test was added for), and reran the test to confirm that it still covers the bug. Bug: 786836 Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270 Commit-Queue: Lowell Manners <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Camille Lamy <[email protected]> Cr-Commit-Position: refs/heads/master@{#653137}
void RenderFrameHostImpl::OnCreateChildFrame( int new_routing_id, service_manager::mojom::InterfaceProviderRequest new_interface_provider_provider_request, blink::mojom::DocumentInterfaceBrokerRequest document_interface_broker_content_request, blink::mojom::DocumentInterfaceBrokerRequest document_interface_broker_blink_request, blink::WebTreeScopeType scope, const std::string& frame_name, const std::string& frame_unique_name, bool is_created_by_script, const base::UnguessableToken& devtools_frame_token, const blink::FramePolicy& frame_policy, const FrameOwnerProperties& frame_owner_properties, const blink::FrameOwnerElementType owner_type) { DCHECK(!frame_unique_name.empty()); DCHECK(new_interface_provider_provider_request.is_pending()); DCHECK(document_interface_broker_content_request.is_pending()); DCHECK(document_interface_broker_blink_request.is_pending()); if (owner_type == blink::FrameOwnerElementType::kNone) { bad_message::ReceivedBadMessage( GetProcess(), bad_message::RFH_CHILD_FRAME_NEEDS_OWNER_ELEMENT_TYPE); } if (!is_active() || !IsCurrent() || !render_frame_created_) return; frame_tree_->AddFrame( frame_tree_node_, GetProcess()->GetID(), new_routing_id, std::move(new_interface_provider_provider_request), std::move(document_interface_broker_content_request), std::move(document_interface_broker_blink_request), scope, frame_name, frame_unique_name, is_created_by_script, devtools_frame_token, frame_policy, frame_owner_properties, was_discarded_, owner_type); }
void RenderFrameHostImpl::OnCreateChildFrame( int new_routing_id, service_manager::mojom::InterfaceProviderRequest new_interface_provider_provider_request, blink::mojom::DocumentInterfaceBrokerRequest document_interface_broker_content_request, blink::mojom::DocumentInterfaceBrokerRequest document_interface_broker_blink_request, blink::WebTreeScopeType scope, const std::string& frame_name, const std::string& frame_unique_name, bool is_created_by_script, const base::UnguessableToken& devtools_frame_token, const blink::FramePolicy& frame_policy, const FrameOwnerProperties& frame_owner_properties, const blink::FrameOwnerElementType owner_type) { DCHECK(!frame_unique_name.empty()); DCHECK(new_interface_provider_provider_request.is_pending()); DCHECK(document_interface_broker_content_request.is_pending()); DCHECK(document_interface_broker_blink_request.is_pending()); if (owner_type == blink::FrameOwnerElementType::kNone) { bad_message::ReceivedBadMessage( GetProcess(), bad_message::RFH_CHILD_FRAME_NEEDS_OWNER_ELEMENT_TYPE); } if (!is_active() || !IsCurrent() || !render_frame_created_) return; frame_tree_->AddFrame( frame_tree_node_, GetProcess()->GetID(), new_routing_id, std::move(new_interface_provider_provider_request), std::move(document_interface_broker_content_request), std::move(document_interface_broker_blink_request), scope, frame_name, frame_unique_name, is_created_by_script, devtools_frame_token, frame_policy, frame_owner_properties, was_discarded_, owner_type); }
C
Chrome
0
CVE-2011-4127
https://www.cvedetails.com/cve/CVE-2011-4127/
CWE-264
https://github.com/torvalds/linux/commit/ec8013beddd717d1740cfefb1a9b900deef85462
ec8013beddd717d1740cfefb1a9b900deef85462
dm: do not forward ioctls from logical volumes to the underlying device A logical volume can map to just part of underlying physical volume. In this case, it must be treated like a partition. Based on a patch from Alasdair G Kergon. Cc: Alasdair G Kergon <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void bypass_pg(struct multipath *m, struct priority_group *pg, int bypassed) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); pg->bypassed = bypassed; m->current_pgpath = NULL; m->current_pg = NULL; spin_unlock_irqrestore(&m->lock, flags); schedule_work(&m->trigger_event); }
static void bypass_pg(struct multipath *m, struct priority_group *pg, int bypassed) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); pg->bypassed = bypassed; m->current_pgpath = NULL; m->current_pg = NULL; spin_unlock_irqrestore(&m->lock, flags); schedule_work(&m->trigger_event); }
C
linux
0
CVE-2016-3835
https://www.cvedetails.com/cve/CVE-2016-3835/
CWE-200
https://android.googlesource.com/platform/hardware/qcom/media/+/7558d03e6498e970b761aa44fff6b2c659202d95
7558d03e6498e970b761aa44fff6b2c659202d95
DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers Heap pointers do not point to user virtual addresses in case of secure session. Set them to NULL and add checks to avoid accesing them Bug: 28815329 Bug: 28920116 Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26
unsigned venc_dev::venc_stop( void) { struct venc_msg venc_msg; struct v4l2_requestbuffers bufreq; int rc = 0, ret = 0; if (!stopped) { enum v4l2_buf_type cap_type; if (streaming[OUTPUT_PORT]) { cap_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; rc = ioctl(m_nDriver_fd, VIDIOC_STREAMOFF, &cap_type); if (rc) { DEBUG_PRINT_ERROR("Failed to call streamoff on driver: capability: %d, %d", cap_type, rc); } else streaming[OUTPUT_PORT] = false; DEBUG_PRINT_LOW("Releasing registered buffers from driver on o/p port"); bufreq.memory = V4L2_MEMORY_USERPTR; bufreq.count = 0; bufreq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; ret = ioctl(m_nDriver_fd, VIDIOC_REQBUFS, &bufreq); if (ret) { DEBUG_PRINT_ERROR("ERROR: VIDIOC_REQBUFS OUTPUT MPLANE Failed"); return false; } } if (!rc && streaming[CAPTURE_PORT]) { cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; rc = ioctl(m_nDriver_fd, VIDIOC_STREAMOFF, &cap_type); if (rc) { DEBUG_PRINT_ERROR("Failed to call streamoff on driver: capability: %d, %d", cap_type, rc); } else streaming[CAPTURE_PORT] = false; DEBUG_PRINT_LOW("Releasing registered buffers from driver on capture port"); bufreq.memory = V4L2_MEMORY_USERPTR; bufreq.count = 0; bufreq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; ret = ioctl(m_nDriver_fd, VIDIOC_REQBUFS, &bufreq); if (ret) { DEBUG_PRINT_ERROR("ERROR: VIDIOC_REQBUFS CAPTURE MPLANE Failed"); return false; } } if (!rc && !ret) { venc_stop_done(); stopped = 1; /*set flag to re-configure when started again*/ resume_in_stopped = 1; } } return rc; }
unsigned venc_dev::venc_stop( void) { struct venc_msg venc_msg; struct v4l2_requestbuffers bufreq; int rc = 0, ret = 0; if (!stopped) { enum v4l2_buf_type cap_type; if (streaming[OUTPUT_PORT]) { cap_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; rc = ioctl(m_nDriver_fd, VIDIOC_STREAMOFF, &cap_type); if (rc) { DEBUG_PRINT_ERROR("Failed to call streamoff on driver: capability: %d, %d", cap_type, rc); } else streaming[OUTPUT_PORT] = false; DEBUG_PRINT_LOW("Releasing registered buffers from driver on o/p port"); bufreq.memory = V4L2_MEMORY_USERPTR; bufreq.count = 0; bufreq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; ret = ioctl(m_nDriver_fd, VIDIOC_REQBUFS, &bufreq); if (ret) { DEBUG_PRINT_ERROR("ERROR: VIDIOC_REQBUFS OUTPUT MPLANE Failed"); return false; } } if (!rc && streaming[CAPTURE_PORT]) { cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; rc = ioctl(m_nDriver_fd, VIDIOC_STREAMOFF, &cap_type); if (rc) { DEBUG_PRINT_ERROR("Failed to call streamoff on driver: capability: %d, %d", cap_type, rc); } else streaming[CAPTURE_PORT] = false; DEBUG_PRINT_LOW("Releasing registered buffers from driver on capture port"); bufreq.memory = V4L2_MEMORY_USERPTR; bufreq.count = 0; bufreq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; ret = ioctl(m_nDriver_fd, VIDIOC_REQBUFS, &bufreq); if (ret) { DEBUG_PRINT_ERROR("ERROR: VIDIOC_REQBUFS CAPTURE MPLANE Failed"); return false; } } if (!rc && !ret) { venc_stop_done(); stopped = 1; /*set flag to re-configure when started again*/ resume_in_stopped = 1; } } return rc; }
C
Android
0
CVE-2015-1265
https://www.cvedetails.com/cve/CVE-2015-1265/
null
https://github.com/chromium/chromium/commit/04ff52bb66284467ccb43d90800013b89ee8db75
04ff52bb66284467ccb43d90800013b89ee8db75
Switching AudioOutputAuthorizationHandler from using AudioManager interface to AudioSystem one. BUG=672468 CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Review-Url: https://codereview.chromium.org/2692203003 Cr-Commit-Position: refs/heads/master@{#450939}
void AudioOutputAuthorizationHandler::GetDeviceParameters( AuthorizationCompletedCallback cb, const std::string& raw_device_id) const { DCHECK_CURRENTLY_ON(BrowserThread::IO); DCHECK(!raw_device_id.empty()); audio_system_->GetOutputStreamParameters( raw_device_id, base::Bind(&AudioOutputAuthorizationHandler::DeviceParametersReceived, weak_factory_.GetWeakPtr(), std::move(cb), false, raw_device_id)); }
void AudioOutputAuthorizationHandler::GetDeviceParameters( AuthorizationCompletedCallback cb, const std::string& raw_device_id) const { DCHECK_CURRENTLY_ON(BrowserThread::IO); DCHECK(!raw_device_id.empty()); base::PostTaskAndReplyWithResult( audio_manager_->GetTaskRunner(), FROM_HERE, base::Bind(&GetDeviceParametersOnDeviceThread, base::Unretained(audio_manager_), raw_device_id), base::Bind(&AudioOutputAuthorizationHandler::DeviceParametersReceived, weak_factory_.GetWeakPtr(), std::move(cb), false, raw_device_id)); }
C
Chrome
1
null
null
null
https://github.com/chromium/chromium/commit/51dfe5e3b332bcea02fb4d4c7493ae841106dd9b
51dfe5e3b332bcea02fb4d4c7493ae841106dd9b
Add ALSA support to volume keys If PulseAudio is running, everything should behave as before, otherwise use ALSA API for adjusting volume. The previous PulseAudioMixer was split into AudioMixerBase and audioMixerPusle, then AudioMixerAlsa was added. BUG=chromium-os:10470 TEST=Volume keys should work even if pulseaudio disabled Review URL: http://codereview.chromium.org/5859003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@71115 0039d316-1c4b-4281-b951-d872f2087c98
PulseAudioMixer::State PulseAudioMixer::CheckState() const { AudioMixer::State AudioMixerPulse::GetState() const { AutoLock lock(mixer_state_lock_); if ((mixer_state_ == READY) && (pa_context_get_state(pa_context_) != PA_CONTEXT_READY)) mixer_state_ = IN_ERROR; return mixer_state_; }
PulseAudioMixer::State PulseAudioMixer::CheckState() const { AutoLock lock(mixer_state_lock_); if ((mixer_state_ == READY) && (pa_context_get_state(pa_context_) != PA_CONTEXT_READY)) mixer_state_ = IN_ERROR; return mixer_state_; }
C
Chrome
1
CVE-2019-15148
https://www.cvedetails.com/cve/CVE-2019-15148/
CWE-787
https://github.com/gopro/gpmf-parser/commit/341f12cd5b97ab419e53853ca00176457c9f1681
341f12cd5b97ab419e53853ca00176457c9f1681
fixed many security issues with the too crude mp4 reader
GPMF_ERR GPMF_Reserved(uint32_t key) { if(key == GPMF_KEY_DEVICE) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_DEVICE_ID) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_DEVICE_NAME) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_STREAM) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_STREAM_NAME) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_SI_UNITS) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_UNITS) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_SCALE) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TYPE) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TOTAL_SAMPLES) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TICK) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TOCK) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_EMPTY_PAYLOADS) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_REMARK) return GPMF_ERROR_RESERVED; return GPMF_OK; }
GPMF_ERR GPMF_Reserved(uint32_t key) { if(key == GPMF_KEY_DEVICE) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_DEVICE_ID) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_DEVICE_NAME) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_STREAM) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_STREAM_NAME) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_SI_UNITS) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_UNITS) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_SCALE) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TYPE) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TOTAL_SAMPLES) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TICK) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_TOCK) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_EMPTY_PAYLOADS) return GPMF_ERROR_RESERVED; if(key == GPMF_KEY_REMARK) return GPMF_ERROR_RESERVED; return GPMF_OK; }
C
gpmf-parser
0
CVE-2015-1274
https://www.cvedetails.com/cve/CVE-2015-1274/
CWE-254
https://github.com/chromium/chromium/commit/d27468a832d5316884bd02f459cbf493697fd7e1
d27468a832d5316884bd02f459cbf493697fd7e1
Switch to equalIgnoringASCIICase throughout modules/accessibility BUG=627682 Review-Url: https://codereview.chromium.org/2793913007 Cr-Commit-Position: refs/heads/master@{#461858}
static Element* siblingWithAriaRole(String role, Node* node) { Node* parent = node->parentNode(); if (!parent) return 0; for (Element* sibling = ElementTraversal::firstChild(*parent); sibling; sibling = ElementTraversal::nextSibling(*sibling)) { const AtomicString& siblingAriaRole = AccessibleNode::getProperty(sibling, AOMStringProperty::kRole); if (equalIgnoringASCIICase(siblingAriaRole, role)) return sibling; } return 0; }
static Element* siblingWithAriaRole(String role, Node* node) { Node* parent = node->parentNode(); if (!parent) return 0; for (Element* sibling = ElementTraversal::firstChild(*parent); sibling; sibling = ElementTraversal::nextSibling(*sibling)) { const AtomicString& siblingAriaRole = AccessibleNode::getProperty(sibling, AOMStringProperty::kRole); if (equalIgnoringCase(siblingAriaRole, role)) return sibling; } return 0; }
C
Chrome
1
CVE-2013-4254
https://www.cvedetails.com/cve/CVE-2013-4254/
CWE-20
https://github.com/torvalds/linux/commit/c95eb3184ea1a3a2551df57190c81da695e2144b
c95eb3184ea1a3a2551df57190c81da695e2144b
ARM: 7809/1: perf: fix event validation for software group leaders It is possible to construct an event group with a software event as a group leader and then subsequently add a hardware event to the group. This results in the event group being validated by adding all members of the group to a fake PMU and attempting to allocate each event on their respective PMU. Unfortunately, for software events wthout a corresponding arm_pmu, this results in a kernel crash attempting to dereference the ->get_event_idx function pointer. This patch fixes the problem by checking explicitly for software events and ignoring those in event validation (since they can always be scheduled). We will probably want to revisit this for 3.12, since the validation checks don't appear to work correctly when dealing with multiple hardware PMUs anyway. Cc: <[email protected]> Reported-by: Vince Weaver <[email protected]> Tested-by: Vince Weaver <[email protected]> Tested-by: Mark Rutland <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Russell King <[email protected]>
int armpmu_register(struct arm_pmu *armpmu, int type) { armpmu_init(armpmu); pm_runtime_enable(&armpmu->plat_device->dev); pr_info("enabled with %s PMU driver, %d counters available\n", armpmu->name, armpmu->num_events); return perf_pmu_register(&armpmu->pmu, armpmu->name, type); }
int armpmu_register(struct arm_pmu *armpmu, int type) { armpmu_init(armpmu); pm_runtime_enable(&armpmu->plat_device->dev); pr_info("enabled with %s PMU driver, %d counters available\n", armpmu->name, armpmu->num_events); return perf_pmu_register(&armpmu->pmu, armpmu->name, type); }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/f2f703241635fa96fa630b83afcc9a330cc21b7e
f2f703241635fa96fa630b83afcc9a330cc21b7e
CrOS Shelf: Get rid of 'split view' mode for shelf background In the new UI, "maximized" and "split view" are treated the same in specs, so there is no more need for a separate "split view" mode. This folds it into the "maximized" mode. Note that the only thing that _seems_ different in shelf_background_animator is ShelfBackgroundAnimator::kMaxAlpha (255) vs kShelfTranslucentMaximizedWindow (254), which should be virtually impossible to distinguish. This CL therefore does not have any visual effect (and doesn't directly fix the linked bug, but is relevant). Bug: 899289 Change-Id: I60947338176ac15ca016b1ba4edf13d16362cb24 Reviewed-on: https://chromium-review.googlesource.com/c/1469741 Commit-Queue: Xiyuan Xia <[email protected]> Reviewed-by: Xiyuan Xia <[email protected]> Auto-Submit: Manu Cornet <[email protected]> Cr-Commit-Position: refs/heads/master@{#631752}
bool TriggerAutoHideTimeout() const { ShelfLayoutManager* layout_manager = GetShelfLayoutManager(); if (!layout_manager->auto_hide_timer_.IsRunning()) return false; layout_manager->auto_hide_timer_.FireNow(); return true; }
bool TriggerAutoHideTimeout() const { ShelfLayoutManager* layout_manager = GetShelfLayoutManager(); if (!layout_manager->auto_hide_timer_.IsRunning()) return false; layout_manager->auto_hide_timer_.FireNow(); return true; }
C
Chrome
0
CVE-2016-1586
https://www.cvedetails.com/cve/CVE-2016-1586/
CWE-20
https://git.launchpad.net/oxide/commit/?id=29014da83e5fc358d6bff0f574e9ed45e61a35ac
29014da83e5fc358d6bff0f574e9ed45e61a35ac
null
WebContextGetter::~WebContextGetter() {}
WebContextGetter::~WebContextGetter() {}
CPP
launchpad
0
CVE-2012-5517
https://www.cvedetails.com/cve/CVE-2012-5517/
null
https://github.com/torvalds/linux/commit/08dff7b7d629807dbb1f398c68dd9cd58dd657a1
08dff7b7d629807dbb1f398c68dd9cd58dd657a1
mm/hotplug: correctly add new zone to all other nodes' zone lists When online_pages() is called to add new memory to an empty zone, it rebuilds all zone lists by calling build_all_zonelists(). But there's a bug which prevents the new zone to be added to other nodes' zone lists. online_pages() { build_all_zonelists() ..... node_set_state(zone_to_nid(zone), N_HIGH_MEMORY) } Here the node of the zone is put into N_HIGH_MEMORY state after calling build_all_zonelists(), but build_all_zonelists() only adds zones from nodes in N_HIGH_MEMORY state to the fallback zone lists. build_all_zonelists() ->__build_all_zonelists() ->build_zonelists() ->find_next_best_node() ->for_each_node_state(n, N_HIGH_MEMORY) So memory in the new zone will never be used by other nodes, and it may cause strange behavor when system is under memory pressure. So put node into N_HIGH_MEMORY state before calling build_all_zonelists(). Signed-off-by: Jianguo Wu <[email protected]> Signed-off-by: Jiang Liu <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Rusty Russell <[email protected]> Cc: Yinghai Lu <[email protected]> Cc: Tony Luck <[email protected]> Cc: KAMEZAWA Hiroyuki <[email protected]> Cc: KOSAKI Motohiro <[email protected]> Cc: David Rientjes <[email protected]> Cc: Keping Chen <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { long offlined = 0; int ret; ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, check_pages_isolated_cb); if (ret < 0) offlined = (long)ret; return offlined; }
check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { long offlined = 0; int ret; ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, check_pages_isolated_cb); if (ret < 0) offlined = (long)ret; return offlined; }
C
linux
0
CVE-2018-13304
https://www.cvedetails.com/cve/CVE-2018-13304/
CWE-617
https://github.com/FFmpeg/FFmpeg/commit/bd27a9364ca274ca97f1df6d984e88a0700fb235
bd27a9364ca274ca97f1df6d984e88a0700fb235
avcodec/mpeg4videodec: Remove use of FF_PROFILE_MPEG4_SIMPLE_STUDIO as indicator of studio profile The profile field is changed by code inside and outside the decoder, its not a reliable indicator of the internal codec state. Maintaining it consistency with studio_profile is messy. Its easier to just avoid it and use only studio_profile Fixes: assertion failure Fixes: ffmpeg_crash_9.avi Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Signed-off-by: Michael Niedermayer <[email protected]>
static void guess_dc(ERContext *s, int16_t *dc, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4); uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4); if(!col || !dist) { av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n"); goto fail; } for(b_y=0; b_y<h; b_y++){ int color= 1024; int distance= -1; for(b_x=0; b_x<w; b_x++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][1]= color; dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999; } color= 1024; distance= -1; for(b_x=w-1; b_x>=0; b_x--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][0]= color; dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999; } } for(b_x=0; b_x<w; b_x++){ int color= 1024; int distance= -1; for(b_y=0; b_y<h; b_y++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][3]= color; dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999; } color= 1024; distance= -1; for(b_y=h-1; b_y>=0; b_y--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][2]= color; dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999; } } for (b_y = 0; b_y < h; b_y++) { for (b_x = 0; b_x < w; b_x++) { int mb_index, error, j; int64_t guess, weight_sum; mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride; error = s->error_status_table[mb_index]; if (IS_INTER(s->cur_pic.mb_type[mb_index])) continue; // inter if (!(error & ER_DC_ERROR)) continue; // dc-ok weight_sum = 0; guess = 0; for (j = 0; j < 4; j++) { int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1); guess += weight*(int64_t)col[b_x + b_y*stride][j]; weight_sum += weight; } guess = (guess + weight_sum / 2) / weight_sum; dc[b_x + b_y * stride] = guess; } } fail: av_freep(&col); av_freep(&dist); }
static void guess_dc(ERContext *s, int16_t *dc, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4); uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4); if(!col || !dist) { av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n"); goto fail; } for(b_y=0; b_y<h; b_y++){ int color= 1024; int distance= -1; for(b_x=0; b_x<w; b_x++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][1]= color; dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999; } color= 1024; distance= -1; for(b_x=w-1; b_x>=0; b_x--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][0]= color; dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999; } } for(b_x=0; b_x<w; b_x++){ int color= 1024; int distance= -1; for(b_y=0; b_y<h; b_y++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][3]= color; dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999; } color= 1024; distance= -1; for(b_y=h-1; b_y>=0; b_y--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][2]= color; dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999; } } for (b_y = 0; b_y < h; b_y++) { for (b_x = 0; b_x < w; b_x++) { int mb_index, error, j; int64_t guess, weight_sum; mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride; error = s->error_status_table[mb_index]; if (IS_INTER(s->cur_pic.mb_type[mb_index])) continue; // inter if (!(error & ER_DC_ERROR)) continue; // dc-ok weight_sum = 0; guess = 0; for (j = 0; j < 4; j++) { int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1); guess += weight*(int64_t)col[b_x + b_y*stride][j]; weight_sum += weight; } guess = (guess + weight_sum / 2) / weight_sum; dc[b_x + b_y * stride] = guess; } } fail: av_freep(&col); av_freep(&dist); }
C
FFmpeg
0
CVE-2012-1601
https://www.cvedetails.com/cve/CVE-2012-1601/
CWE-399
https://github.com/torvalds/linux/commit/9c895160d25a76c21b65bad141b08e8d4f99afef
9c895160d25a76c21b65bad141b08e8d4f99afef
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings (cherry picked from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e) If some vcpus are created before KVM_CREATE_IRQCHIP, then irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading to potential NULL pointer dereferences. Fix by: - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP This is somewhat long winded because vcpu->arch.apic is created without kvm->lock held. Based on earlier patch by Michael Ellerman. Signed-off-by: Michael Ellerman <[email protected]> Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p_exit_data; p_exit_data = kvm_get_exit_data(vcpu); return p_exit_data->exit_reason; }
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) { struct exit_ctl_data *p_exit_data; p_exit_data = kvm_get_exit_data(vcpu); return p_exit_data->exit_reason; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/93dd81929416a0170935e6eeac03d10aed60df18
93dd81929416a0170935e6eeac03d10aed60df18
Implement NPN_RemoveProperty https://bugs.webkit.org/show_bug.cgi?id=43315 Reviewed by Sam Weinig. WebKit2: * WebProcess/Plugins/NPJSObject.cpp: (WebKit::NPJSObject::removeProperty): Try to remove the property. (WebKit::NPJSObject::npClass): Add NP_RemoveProperty. (WebKit::NPJSObject::NP_RemoveProperty): Call NPJSObject::removeProperty. * WebProcess/Plugins/Netscape/NetscapeBrowserFuncs.cpp: (WebKit::NPN_RemoveProperty): Call the NPClass::removeProperty function. WebKitTools: * DumpRenderTree/DumpRenderTree.xcodeproj/project.pbxproj: Add NPRuntimeRemoveProperty.cpp * DumpRenderTree/TestNetscapePlugIn/PluginTest.cpp: (PluginTest::NPN_GetStringIdentifier): (PluginTest::NPN_GetIntIdentifier): (PluginTest::NPN_RemoveProperty): Add NPN_ helpers. * DumpRenderTree/TestNetscapePlugIn/PluginTest.h: Support more NPClass functions. * DumpRenderTree/TestNetscapePlugIn/Tests/NPRuntimeRemoveProperty.cpp: Added. (NPRuntimeRemoveProperty::NPRuntimeRemoveProperty): Test for NPN_RemoveProperty. (NPRuntimeRemoveProperty::TestObject::hasMethod): (NPRuntimeRemoveProperty::TestObject::invoke): Add a testRemoveProperty method. (NPRuntimeRemoveProperty::NPP_GetValue): Return the test object. * DumpRenderTree/TestNetscapePlugIn/win/TestNetscapePlugin.vcproj: * DumpRenderTree/qt/TestNetscapePlugin/TestNetscapePlugin.pro: * GNUmakefile.am: Add NPRuntimeRemoveProperty.cpp LayoutTests: Add a test for NPN_RemoveProperty. * plugins/npruntime/remove-property-expected.txt: Added. * plugins/npruntime/remove-property.html: Added. git-svn-id: svn://svn.chromium.org/blink/trunk@64444 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static NPError NPN_GetURLNotify(NPP npp, const char* url, const char* target, void* notifyData) { if (!url) return NPERR_GENERIC_ERROR; RefPtr<NetscapePlugin> plugin = NetscapePlugin::fromNPP(npp); plugin->loadURL("GET", makeURLString(url), target, HTTPHeaderMap(), Vector<char>(), true, notifyData); return NPERR_NO_ERROR; }
static NPError NPN_GetURLNotify(NPP npp, const char* url, const char* target, void* notifyData) { if (!url) return NPERR_GENERIC_ERROR; RefPtr<NetscapePlugin> plugin = NetscapePlugin::fromNPP(npp); plugin->loadURL("GET", makeURLString(url), target, HTTPHeaderMap(), Vector<char>(), true, notifyData); return NPERR_NO_ERROR; }
C
Chrome
0
CVE-2010-1166
https://www.cvedetails.com/cve/CVE-2010-1166/
CWE-189
https://cgit.freedesktop.org/xorg/xserver/commit/?id=d2f813f7db
d2f813f7db157fc83abc4b3726821c36ee7e40b1
null
fbFetch_r5g6b5 (const FbBits *bits, int x, int width, CARD32 *buffer, miIndexedPtr indexed) { const CARD16 *pixel = (const CARD16 *)bits + x; const CARD16 *end = pixel + width; while (pixel < end) { CARD32 p = READ(pixel++); CARD32 r = (((p) << 3) & 0xf8) | (((p) << 5) & 0xfc00) | (((p) << 8) & 0xf80000); r |= (r >> 5) & 0x70007; r |= (r >> 6) & 0x300; WRITE(buffer++, 0xff000000 | r); } }
fbFetch_r5g6b5 (const FbBits *bits, int x, int width, CARD32 *buffer, miIndexedPtr indexed) { const CARD16 *pixel = (const CARD16 *)bits + x; const CARD16 *end = pixel + width; while (pixel < end) { CARD32 p = READ(pixel++); CARD32 r = (((p) << 3) & 0xf8) | (((p) << 5) & 0xfc00) | (((p) << 8) & 0xf80000); r |= (r >> 5) & 0x70007; r |= (r >> 6) & 0x300; WRITE(buffer++, 0xff000000 | r); } }
C
xserver
0
CVE-2016-4478
https://www.cvedetails.com/cve/CVE-2016-4478/
CWE-119
https://github.com/atheme/atheme/commit/87580d767868360d2fed503980129504da84b63e
87580d767868360d2fed503980129504da84b63e
Do not copy more bytes than were allocated
char *xmlrpc_normalizeBuffer(const char *buf) { char *newbuf; int i, len, j = 0; len = strlen(buf); newbuf = (char *)smalloc(sizeof(char) * len + 1); for (i = 0; i < len; i++) { switch (buf[i]) { /* ctrl char */ case 1: break; /* Bold ctrl char */ case 2: break; /* Color ctrl char */ case 3: /* If the next character is a digit, its also removed */ if (isdigit((unsigned char)buf[i + 1])) { i++; /* not the best way to remove colors * which are two digit but no worse then * how the Unreal does with +S - TSL */ if (isdigit((unsigned char)buf[i + 1])) { i++; } /* Check for background color code * and remove it as well */ if (buf[i + 1] == ',') { i++; if (isdigit((unsigned char)buf[i + 1])) { i++; } /* not the best way to remove colors * which are two digit but no worse then * how the Unreal does with +S - TSL */ if (isdigit((unsigned char)buf[i + 1])) { i++; } } } break; /* tabs char */ case 9: break; /* line feed char */ case 10: break; /* carrage returns char */ case 13: break; /* Reverse ctrl char */ case 22: break; /* Underline ctrl char */ case 31: break; /* A valid char gets copied into the new buffer */ default: /* All valid <32 characters are handled above. */ if (buf[i] > 31) { newbuf[j] = buf[i]; j++; } } } /* Terminate the string */ newbuf[j] = 0; return (newbuf); }
char *xmlrpc_normalizeBuffer(const char *buf) { char *newbuf; int i, len, j = 0; len = strlen(buf); newbuf = (char *)smalloc(sizeof(char) * len + 1); for (i = 0; i < len; i++) { switch (buf[i]) { /* ctrl char */ case 1: break; /* Bold ctrl char */ case 2: break; /* Color ctrl char */ case 3: /* If the next character is a digit, its also removed */ if (isdigit((unsigned char)buf[i + 1])) { i++; /* not the best way to remove colors * which are two digit but no worse then * how the Unreal does with +S - TSL */ if (isdigit((unsigned char)buf[i + 1])) { i++; } /* Check for background color code * and remove it as well */ if (buf[i + 1] == ',') { i++; if (isdigit((unsigned char)buf[i + 1])) { i++; } /* not the best way to remove colors * which are two digit but no worse then * how the Unreal does with +S - TSL */ if (isdigit((unsigned char)buf[i + 1])) { i++; } } } break; /* tabs char */ case 9: break; /* line feed char */ case 10: break; /* carrage returns char */ case 13: break; /* Reverse ctrl char */ case 22: break; /* Underline ctrl char */ case 31: break; /* A valid char gets copied into the new buffer */ default: /* All valid <32 characters are handled above. */ if (buf[i] > 31) { newbuf[j] = buf[i]; j++; } } } /* Terminate the string */ newbuf[j] = 0; return (newbuf); }
C
atheme
0
CVE-2018-6063
https://www.cvedetails.com/cve/CVE-2018-6063/
CWE-787
https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608
673ce95d481ea9368c4d4d43ac756ba1d6d9e608
Correct mojo::WrapSharedMemoryHandle usage Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which were assuming that the call actually has any control over the memory protection applied to a handle when mapped. Where fixing usage is infeasible for this CL, TODOs are added to annotate follow-up work. Also updates the API and documentation to (hopefully) improve clarity and avoid similar mistakes from being made in the future. BUG=792900 Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477 Reviewed-on: https://chromium-review.googlesource.com/818282 Reviewed-by: Wei Li <[email protected]> Reviewed-by: Lei Zhang <[email protected]> Reviewed-by: John Abd-El-Malek <[email protected]> Reviewed-by: Daniel Cheng <[email protected]> Reviewed-by: Sadrul Chowdhury <[email protected]> Reviewed-by: Yuzhu Shen <[email protected]> Reviewed-by: Robert Sesek <[email protected]> Commit-Queue: Ken Rockot <[email protected]> Cr-Commit-Position: refs/heads/master@{#530268}
Core::~Core() { if (node_controller_ && node_controller_->io_task_runner()) { scoped_refptr<base::TaskRunner> io_task_runner = node_controller_->io_task_runner(); io_task_runner->PostTask(FROM_HERE, base::Bind(&Core::PassNodeControllerToIOThread, base::Passed(&node_controller_))); } base::trace_event::MemoryDumpManager::GetInstance() ->UnregisterAndDeleteDumpProviderSoon(std::move(handles_)); }
Core::~Core() { if (node_controller_ && node_controller_->io_task_runner()) { scoped_refptr<base::TaskRunner> io_task_runner = node_controller_->io_task_runner(); io_task_runner->PostTask(FROM_HERE, base::Bind(&Core::PassNodeControllerToIOThread, base::Passed(&node_controller_))); } base::trace_event::MemoryDumpManager::GetInstance() ->UnregisterAndDeleteDumpProviderSoon(std::move(handles_)); }
C
Chrome
0
CVE-2013-0918
https://www.cvedetails.com/cve/CVE-2013-0918/
CWE-264
https://github.com/chromium/chromium/commit/0a57375ad73780e61e1770a9d88b0529b0dbd33b
0a57375ad73780e61e1770a9d88b0529b0dbd33b
Let the browser handle external navigations from DevTools. BUG=180555 Review URL: https://chromiumcodereview.appspot.com/12531004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@186793 0039d316-1c4b-4281-b951-d872f2087c98
void RenderViewImpl::CreateFrameTree(WebKit::WebFrame* frame, base::DictionaryValue* frame_tree) { DCHECK(false); NavigateToSwappedOutURL(frame); string16 name; if (frame_tree->GetString(kFrameTreeNodeNameKey, &name) && !name.empty()) frame->setName(name); int remote_id; if (frame_tree->GetInteger(kFrameTreeNodeIdKey, &remote_id)) active_frame_id_map_.insert(std::pair<int, int>(frame->identifier(), remote_id)); base::ListValue* children; if (!frame_tree->GetList(kFrameTreeNodeSubtreeKey, &children)) return; base::DictionaryValue* child; for (size_t i = 0; i < children->GetSize(); ++i) { if (!children->GetDictionary(i, &child)) continue; WebElement element = frame->document().createElement("iframe"); element.setAttribute("width", "0"); element.setAttribute("height", "0"); element.setAttribute("frameBorder", "0"); if (frame->document().body().appendChild(element)) { WebFrame* subframe = WebFrame::fromFrameOwnerElement(element); if (subframe) CreateFrameTree(subframe, child); } else { LOG(ERROR) << "Failed to append created iframe element."; } } }
void RenderViewImpl::CreateFrameTree(WebKit::WebFrame* frame, base::DictionaryValue* frame_tree) { DCHECK(false); NavigateToSwappedOutURL(frame); string16 name; if (frame_tree->GetString(kFrameTreeNodeNameKey, &name) && !name.empty()) frame->setName(name); int remote_id; if (frame_tree->GetInteger(kFrameTreeNodeIdKey, &remote_id)) active_frame_id_map_.insert(std::pair<int, int>(frame->identifier(), remote_id)); base::ListValue* children; if (!frame_tree->GetList(kFrameTreeNodeSubtreeKey, &children)) return; base::DictionaryValue* child; for (size_t i = 0; i < children->GetSize(); ++i) { if (!children->GetDictionary(i, &child)) continue; WebElement element = frame->document().createElement("iframe"); element.setAttribute("width", "0"); element.setAttribute("height", "0"); element.setAttribute("frameBorder", "0"); if (frame->document().body().appendChild(element)) { WebFrame* subframe = WebFrame::fromFrameOwnerElement(element); if (subframe) CreateFrameTree(subframe, child); } else { LOG(ERROR) << "Failed to append created iframe element."; } } }
C
Chrome
0
CVE-2018-6040
https://www.cvedetails.com/cve/CVE-2018-6040/
CWE-732
https://github.com/chromium/chromium/commit/209f225b2d51334eaf69ffdf002e25eaa1e0d448
209f225b2d51334eaf69ffdf002e25eaa1e0d448
Fixed bug where PlzNavigate CSP in a iframe did not get the inherited CSP When inheriting the CSP from a parent document to a local-scheme CSP, it does not always get propagated to the PlzNavigate CSP. This means that PlzNavigate CSP checks (like `frame-src`) would be ran against a blank policy instead of the proper inherited policy. Bug: 778658 Change-Id: I61bb0d432e1cea52f199e855624cb7b3078f56a9 Reviewed-on: https://chromium-review.googlesource.com/765969 Commit-Queue: Andy Paicu <[email protected]> Reviewed-by: Mike West <[email protected]> Cr-Commit-Position: refs/heads/master@{#518245}
void Document::InitContentSecurityPolicy( ContentSecurityPolicy* csp, const ContentSecurityPolicy* policy_to_inherit) { SetContentSecurityPolicy(csp ? csp : ContentSecurityPolicy::Create()); GetContentSecurityPolicy()->BindToExecutionContext(this); if (policy_to_inherit) { GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit); } else if (frame_) { Frame* inherit_from = frame_->Tree().Parent() ? frame_->Tree().Parent() : frame_->Client()->Opener(); if (inherit_from && frame_ != inherit_from) { DCHECK(inherit_from->GetSecurityContext() && inherit_from->GetSecurityContext()->GetContentSecurityPolicy()); policy_to_inherit = inherit_from->GetSecurityContext()->GetContentSecurityPolicy(); if (url_.IsEmpty() || url_.ProtocolIsAbout() || url_.ProtocolIsData() || url_.ProtocolIs("blob") || url_.ProtocolIs("filesystem")) { GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit); } } } if (policy_to_inherit && IsPluginDocument()) GetContentSecurityPolicy()->CopyPluginTypesFrom(policy_to_inherit); }
void Document::InitContentSecurityPolicy( ContentSecurityPolicy* csp, const ContentSecurityPolicy* policy_to_inherit) { SetContentSecurityPolicy(csp ? csp : ContentSecurityPolicy::Create()); if (policy_to_inherit) { GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit); } else if (frame_) { Frame* inherit_from = frame_->Tree().Parent() ? frame_->Tree().Parent() : frame_->Client()->Opener(); if (inherit_from && frame_ != inherit_from) { DCHECK(inherit_from->GetSecurityContext() && inherit_from->GetSecurityContext()->GetContentSecurityPolicy()); policy_to_inherit = inherit_from->GetSecurityContext()->GetContentSecurityPolicy(); if (url_.IsEmpty() || url_.ProtocolIsAbout() || url_.ProtocolIsData() || url_.ProtocolIs("blob") || url_.ProtocolIs("filesystem")) { GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit); } } } if (policy_to_inherit && IsPluginDocument()) GetContentSecurityPolicy()->CopyPluginTypesFrom(policy_to_inherit); GetContentSecurityPolicy()->BindToExecutionContext(this); }
C
Chrome
1
CVE-2018-18352
https://www.cvedetails.com/cve/CVE-2018-18352/
CWE-732
https://github.com/chromium/chromium/commit/a9cbaa7a40e2b2723cfc2f266c42f4980038a949
a9cbaa7a40e2b2723cfc2f266c42f4980038a949
Simplify "WouldTaintOrigin" concept in media/blink Currently WebMediaPlayer has three predicates: - DidGetOpaqueResponseFromServiceWorker - HasSingleSecurityOrigin - DidPassCORSAccessCheck . These are used to determine whether the response body is available for scripts. They are known to be confusing, and actually MediaElementAudioSourceHandler::WouldTaintOrigin misuses them. This CL merges the three predicates to one, WouldTaintOrigin, to remove the confusion. Now the "response type" concept is available and we don't need a custom CORS check, so this CL removes BaseAudioContext::WouldTaintOrigin. This CL also renames URLData::has_opaque_data_ and its (direct and indirect) data accessors to match the spec. Bug: 849942, 875153 Change-Id: I6acf50169d7445c4ff614e80ac606f79ee577d2a Reviewed-on: https://chromium-review.googlesource.com/c/1238098 Reviewed-by: Fredrik Hubinette <[email protected]> Reviewed-by: Kinuko Yasuda <[email protected]> Reviewed-by: Raymond Toy <[email protected]> Commit-Queue: Yutaka Hirano <[email protected]> Cr-Commit-Position: refs/heads/master@{#598258}
void HTMLMediaElement::DurationChanged(double duration, bool request_seek) { BLINK_MEDIA_LOG << "durationChanged(" << (void*)this << ", " << duration << ", " << BoolString(request_seek) << ")"; if (duration_ == duration) return; BLINK_MEDIA_LOG << "durationChanged(" << (void*)this << ") : " << duration_ << " -> " << duration; duration_ = duration; ScheduleEvent(EventTypeNames::durationchange); if (GetLayoutObject()) GetLayoutObject()->UpdateFromElement(); if (request_seek) Seek(duration); }
void HTMLMediaElement::DurationChanged(double duration, bool request_seek) { BLINK_MEDIA_LOG << "durationChanged(" << (void*)this << ", " << duration << ", " << BoolString(request_seek) << ")"; if (duration_ == duration) return; BLINK_MEDIA_LOG << "durationChanged(" << (void*)this << ") : " << duration_ << " -> " << duration; duration_ = duration; ScheduleEvent(EventTypeNames::durationchange); if (GetLayoutObject()) GetLayoutObject()->UpdateFromElement(); if (request_seek) Seek(duration); }
C
Chrome
0
CVE-2011-2918
https://www.cvedetails.com/cve/CVE-2011-2918/
CWE-399
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
perf: Remove the nmi parameter from the swevent and overflow interface The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <[email protected]> Cc: Michael Cree <[email protected]> Cc: Will Deacon <[email protected]> Cc: Deng-Cheng Zhu <[email protected]> Cc: Anton Blanchard <[email protected]> Cc: Eric B Munson <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Paul Mundt <[email protected]> Cc: David S. Miller <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jason Wessel <[email protected]> Cc: Don Zickus <[email protected]> Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static void perf_ctx_lock(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { raw_spin_lock(&cpuctx->ctx.lock); if (ctx) raw_spin_lock(&ctx->lock); }
static void perf_ctx_lock(struct perf_cpu_context *cpuctx, struct perf_event_context *ctx) { raw_spin_lock(&cpuctx->ctx.lock); if (ctx) raw_spin_lock(&ctx->lock); }
C
linux
0
CVE-2016-4558
https://www.cvedetails.com/cve/CVE-2016-4558/
null
https://github.com/torvalds/linux/commit/92117d8443bc5afacc8d5ba82e541946310f106e
92117d8443bc5afacc8d5ba82e541946310f106e
bpf: fix refcnt overflow On a system with >32Gbyte of phyiscal memory and infinite RLIMIT_MEMLOCK, the malicious application may overflow 32-bit bpf program refcnt. It's also possible to overflow map refcnt on 1Tb system. Impose 32k hard limit which means that the same bpf program or map cannot be shared by more than 32k processes. Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs") Reported-by: Jann Horn <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int bpf_map_new_fd(struct bpf_map *map) { return anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC); }
int bpf_map_new_fd(struct bpf_map *map) { return anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC); }
C
linux
0
CVE-2013-2168
https://www.cvedetails.com/cve/CVE-2013-2168/
CWE-20
https://cgit.freedesktop.org/dbus/dbus/commit/?id=954d75b2b64e4799f360d2a6bf9cff6d9fee37e7
954d75b2b64e4799f360d2a6bf9cff6d9fee37e7
null
static DWORD WINAPI dump_thread_proc(LPVOID lpParameter) { dump_backtrace_for_thread((HANDLE)lpParameter); return 0; }
static DWORD WINAPI dump_thread_proc(LPVOID lpParameter) { dump_backtrace_for_thread((HANDLE)lpParameter); return 0; }
C
dbus
0
CVE-2012-3412
https://www.cvedetails.com/cve/CVE-2012-3412/
CWE-189
https://github.com/torvalds/linux/commit/68cb695ccecf949d48949e72f8ce591fdaaa325c
68cb695ccecf949d48949e72f8ce591fdaaa325c
sfc: Fix maximum number of TSO segments and minimum TX queue size [ Upstream commit 7e6d06f0de3f74ca929441add094518ae332257c ] Currently an skb requiring TSO may not fit within a minimum-size TX queue. The TX queue selected for the skb may stall and trigger the TX watchdog repeatedly (since the problem skb will be retried after the TX reset). This issue is designated as CVE-2012-3412. Set the maximum number of TSO segments for our devices to 100. This should make no difference to behaviour unless the actual MSS is less than about 700. Increase the minimum TX queue size accordingly to allow for 2 worst-case skbs, so that there will definitely be space to add an skb after we wake a queue. To avoid invalidating existing configurations, change efx_ethtool_set_ringparam() to fix up values that are too small rather than returning -EINVAL. Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: David S. Miller <[email protected]> Signed-off-by: Ben Hutchings <[email protected]>
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { struct efx_nic *efx = netdev_priv(net_dev); struct mii_ioctl_data *data = if_mii(ifr); EFX_ASSERT_RESET_SERIALISED(efx); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && (data->phy_id & 0xfc00) == 0x0400) data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; return mdio_mii_ioctl(&efx->mdio, data, cmd); }
static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { struct efx_nic *efx = netdev_priv(net_dev); struct mii_ioctl_data *data = if_mii(ifr); EFX_ASSERT_RESET_SERIALISED(efx); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && (data->phy_id & 0xfc00) == 0x0400) data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; return mdio_mii_ioctl(&efx->mdio, data, cmd); }
C
linux
0
CVE-2015-1465
https://www.cvedetails.com/cve/CVE-2015-1465/
CWE-17
https://github.com/torvalds/linux/commit/df4d92549f23e1c037e83323aff58a21b3de7fe0
df4d92549f23e1c037e83323aff58a21b3de7fe0
ipv4: try to cache dst_entries which would cause a redirect Not caching dst_entries which cause redirects could be exploited by hosts on the same subnet, causing a severe DoS attack. This effect aggravated since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()"). Lookups causing redirects will be allocated with DST_NOCACHE set which will force dst_release to free them via RCU. Unfortunately waiting for RCU grace period just takes too long, we can end up with >1M dst_entries waiting to be released and the system will run OOM. rcuos threads cannot catch up under high softirq load. Attaching the flag to emit a redirect later on to the specific skb allows us to cache those dst_entries thus reducing the pressure on allocation and deallocation. This issue was discovered by Marcelo Leitner. Cc: Julian Anastasov <[email protected]> Signed-off-by: Marcelo Leitner <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: Julian Anastasov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { }
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { }
C
linux
0
CVE-2011-2491
https://www.cvedetails.com/cve/CVE-2011-2491/
CWE-399
https://github.com/torvalds/linux/commit/0b760113a3a155269a3fba93a409c640031dd68f
0b760113a3a155269a3fba93a409c640031dd68f
NLM: Don't hang forever on NLM unlock requests If the NLM daemon is killed on the NFS server, we can currently end up hanging forever on an 'unlock' request, instead of aborting. Basically, if the rpcbind request fails, or the server keeps returning garbage, we really want to quit instead of retrying. Tested-by: Vasily Averin <[email protected]> Signed-off-by: Trond Myklebust <[email protected]> Cc: [email protected]
static void rpc_task_set_debuginfo(struct rpc_task *task) { static atomic_t rpc_pid; task->tk_pid = atomic_inc_return(&rpc_pid); }
static void rpc_task_set_debuginfo(struct rpc_task *task) { static atomic_t rpc_pid; task->tk_pid = atomic_inc_return(&rpc_pid); }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/957973753ec4159003ff7930d946b7e89c7e09f3
957973753ec4159003ff7930d946b7e89c7e09f3
Make NotifyHeadersComplete the last call in the function. BUG=82903 Review URL: http://codereview.chromium.org/7038017 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85719 0039d316-1c4b-4281-b951-d872f2087c98
bool BlobURLRequestJob::DispatchReadFile(const BlobData::Item& item) { if (stream_ != NULL) return ReadFile(item); base::FileUtilProxy::CreateOrOpen( file_thread_proxy_, item.file_path(), kFileOpenFlags, callback_factory_.NewCallback(&BlobURLRequestJob::DidOpen)); SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0)); return false; }
bool BlobURLRequestJob::DispatchReadFile(const BlobData::Item& item) { if (stream_ != NULL) return ReadFile(item); base::FileUtilProxy::CreateOrOpen( file_thread_proxy_, item.file_path(), kFileOpenFlags, callback_factory_.NewCallback(&BlobURLRequestJob::DidOpen)); SetStatus(net::URLRequestStatus(net::URLRequestStatus::IO_PENDING, 0)); return false; }
C
Chrome
0
CVE-2016-1618
https://www.cvedetails.com/cve/CVE-2016-1618/
CWE-310
https://github.com/chromium/chromium/commit/0d151e09e13a704e9738ea913d117df7282e6c7d
0d151e09e13a704e9738ea913d117df7282e6c7d
Add assertions that the empty Platform::cryptographicallyRandomValues() overrides are not being used. These implementations are not safe and look scary if not accompanied by an assertion. Also one of the comments was incorrect. BUG=552749 Review URL: https://codereview.chromium.org/1419293005 Cr-Commit-Position: refs/heads/master@{#359229}
virtual void SetUp() { blink::Platform::initialize(&m_proxyPlatform); m_globalMemoryCache = replaceMemoryCacheForTesting(MemoryCache::create()); m_fetcher = ResourceFetcher::create(MockFetchContext::create()); }
virtual void SetUp() { blink::Platform::initialize(&m_proxyPlatform); m_globalMemoryCache = replaceMemoryCacheForTesting(MemoryCache::create()); m_fetcher = ResourceFetcher::create(MockFetchContext::create()); }
C
Chrome
0
CVE-2016-3751
https://www.cvedetails.com/cve/CVE-2016-3751/
null
https://android.googlesource.com/platform/external/libpng/+/9d4853418ab2f754c2b63e091c29c5529b8b86ca
9d4853418ab2f754c2b63e091c29c5529b8b86ca
DO NOT MERGE Update libpng to 1.6.20 BUG:23265085 Change-Id: I85199805636d771f3597b691b63bc0bf46084833 (cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
image_transform_png_set_gray_to_rgb_mod(PNG_CONST image_transform *this, image_transform_png_set_gray_to_rgb_mod(const image_transform *this, image_pixel *that, png_const_structp pp, const transform_display *display) { /* NOTE: we can actually pend the tRNS processing at this point because we * can correctly recognize the original pixel value even though we have * mapped the one gray channel to the three RGB ones, but in fact libpng * doesn't do this, so we don't either. */ if ((that->colour_type & PNG_COLOR_MASK_COLOR) == 0 && that->have_tRNS) image_pixel_add_alpha(that, &display->this, 0/*!for background*/); /* Simply expand the bit depth and alter the colour type as required. */ if (that->colour_type == PNG_COLOR_TYPE_GRAY) { /* RGB images have a bit depth at least equal to '8' */ if (that->bit_depth < 8) that->sample_depth = that->bit_depth = 8; /* And just changing the colour type works here because the green and blue * channels are being maintained in lock-step with the red/gray: */ that->colour_type = PNG_COLOR_TYPE_RGB; } else if (that->colour_type == PNG_COLOR_TYPE_GRAY_ALPHA) that->colour_type = PNG_COLOR_TYPE_RGB_ALPHA; this->next->mod(this->next, that, pp, display); }
image_transform_png_set_gray_to_rgb_mod(PNG_CONST image_transform *this, image_pixel *that, png_const_structp pp, PNG_CONST transform_display *display) { /* NOTE: we can actually pend the tRNS processing at this point because we * can correctly recognize the original pixel value even though we have * mapped the one gray channel to the three RGB ones, but in fact libpng * doesn't do this, so we don't either. */ if ((that->colour_type & PNG_COLOR_MASK_COLOR) == 0 && that->have_tRNS) image_pixel_add_alpha(that, &display->this); /* Simply expand the bit depth and alter the colour type as required. */ if (that->colour_type == PNG_COLOR_TYPE_GRAY) { /* RGB images have a bit depth at least equal to '8' */ if (that->bit_depth < 8) that->sample_depth = that->bit_depth = 8; /* And just changing the colour type works here because the green and blue * channels are being maintained in lock-step with the red/gray: */ that->colour_type = PNG_COLOR_TYPE_RGB; } else if (that->colour_type == PNG_COLOR_TYPE_GRAY_ALPHA) that->colour_type = PNG_COLOR_TYPE_RGB_ALPHA; this->next->mod(this->next, that, pp, display); }
C
Android
1
CVE-2012-2816
https://www.cvedetails.com/cve/CVE-2012-2816/
null
https://github.com/chromium/chromium/commit/cd0bd79d6ebdb72183e6f0833673464cc10b3600
cd0bd79d6ebdb72183e6f0833673464cc10b3600
Convert plugin and GPU process to brokered handle duplication. BUG=119250 Review URL: https://chromiumcodereview.appspot.com/9958034 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
void GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped( const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params) { TRACE_EVENT0("renderer", "GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped"); base::ScopedClosureRunner scoped_completion_runner( base::Bind(&AcceleratedSurfaceBuffersSwappedCompleted, host_id_, params.route_id, true)); gfx::PluginWindowHandle handle = GpuSurfaceTracker::Get()->GetSurfaceWindowHandle(params.surface_id); if (!handle) return; scoped_refptr<AcceleratedPresenter> presenter( AcceleratedPresenter::GetForWindow(handle)); if (!presenter) return; scoped_completion_runner.Release(); presenter->AsyncPresentAndAcknowledge( params.size, params.surface_handle, base::Bind(&AcceleratedSurfaceBuffersSwappedCompleted, host_id_, params.route_id)); }
void GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped( const GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params& params) { TRACE_EVENT0("renderer", "GpuProcessHost::OnAcceleratedSurfaceBuffersSwapped"); base::ScopedClosureRunner scoped_completion_runner( base::Bind(&AcceleratedSurfaceBuffersSwappedCompleted, host_id_, params.route_id, true)); gfx::PluginWindowHandle handle = GpuSurfaceTracker::Get()->GetSurfaceWindowHandle(params.surface_id); if (!handle) return; scoped_refptr<AcceleratedPresenter> presenter( AcceleratedPresenter::GetForWindow(handle)); if (!presenter) return; scoped_completion_runner.Release(); presenter->AsyncPresentAndAcknowledge( params.size, params.surface_handle, base::Bind(&AcceleratedSurfaceBuffersSwappedCompleted, host_id_, params.route_id)); }
C
Chrome
0
CVE-2012-1179
https://www.cvedetails.com/cve/CVE-2012-1179/
CWE-264
https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850
4a1d704194a441bf83c636004a479e01360ec850
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream. In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) { struct pt_regs *regs32; regs32 = save_v86_state(regs16); regs32->ax = retval; __asm__ __volatile__("movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" "jmp resume_userspace" : : "r" (regs32), "r" (current_thread_info())); }
static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) { struct pt_regs *regs32; regs32 = save_v86_state(regs16); regs32->ax = retval; __asm__ __volatile__("movl %0,%%esp\n\t" "movl %1,%%ebp\n\t" "jmp resume_userspace" : : "r" (regs32), "r" (current_thread_info())); }
C
linux
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 [email protected] Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void nullableStringAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info) { TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder()); V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, cppValue, jsValue); imp->setNullableStringAttribute(cppValue); }
static void nullableStringAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info) { TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder()); V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, cppValue, jsValue); imp->setNullableStringAttribute(cppValue); }
C
Chrome
0
CVE-2013-2906
https://www.cvedetails.com/cve/CVE-2013-2906/
CWE-362
https://github.com/chromium/chromium/commit/c2364e0ce42878a2177c6f4cf7adb3c715b777c1
c2364e0ce42878a2177c6f4cf7adb3c715b777c1
[OriginChip] Re-enable the chip as necessary when switching tabs. BUG=369500 Review URL: https://codereview.chromium.org/292493003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@271161 0039d316-1c4b-4281-b951-d872f2087c98
void RecordPercentageMatchHistogram(const base::string16& old_text, const base::string16& new_text, bool url_replacement_active, content::PageTransition transition, int omnibox_width) { size_t avg_length = (old_text.length() + new_text.length()) / 2; int percent = 0; if (!old_text.empty() && !new_text.empty()) { size_t shorter_length = std::min(old_text.length(), new_text.length()); base::string16::const_iterator end(old_text.begin() + shorter_length); base::string16::const_iterator mismatch( std::mismatch(old_text.begin(), end, new_text.begin()).first); size_t matching_characters = mismatch - old_text.begin(); percent = static_cast<float>(matching_characters) / avg_length * 100; } std::string histogram_name; if (url_replacement_active) { if (transition == content::PAGE_TRANSITION_TYPED) { histogram_name = "InstantExtended.PercentageMatchV2_QuerytoURL"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } else { histogram_name = "InstantExtended.PercentageMatchV2_QuerytoQuery"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } } else { if (transition == content::PAGE_TRANSITION_TYPED) { histogram_name = "InstantExtended.PercentageMatchV2_URLtoURL"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } else { histogram_name = "InstantExtended.PercentageMatchV2_URLtoQuery"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } } std::string suffix = "large"; for (size_t i = 0; i < arraysize(kPercentageMatchHistogramWidthBuckets); ++i) { if (omnibox_width < kPercentageMatchHistogramWidthBuckets[i]) { suffix = base::IntToString(kPercentageMatchHistogramWidthBuckets[i]); break; } } base::HistogramBase* counter = base::LinearHistogram::FactoryGet( histogram_name + "_" + suffix, 1, 101, 102, base::Histogram::kUmaTargetedHistogramFlag); counter->Add(percent); }
void RecordPercentageMatchHistogram(const base::string16& old_text, const base::string16& new_text, bool url_replacement_active, content::PageTransition transition, int omnibox_width) { size_t avg_length = (old_text.length() + new_text.length()) / 2; int percent = 0; if (!old_text.empty() && !new_text.empty()) { size_t shorter_length = std::min(old_text.length(), new_text.length()); base::string16::const_iterator end(old_text.begin() + shorter_length); base::string16::const_iterator mismatch( std::mismatch(old_text.begin(), end, new_text.begin()).first); size_t matching_characters = mismatch - old_text.begin(); percent = static_cast<float>(matching_characters) / avg_length * 100; } std::string histogram_name; if (url_replacement_active) { if (transition == content::PAGE_TRANSITION_TYPED) { histogram_name = "InstantExtended.PercentageMatchV2_QuerytoURL"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } else { histogram_name = "InstantExtended.PercentageMatchV2_QuerytoQuery"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } } else { if (transition == content::PAGE_TRANSITION_TYPED) { histogram_name = "InstantExtended.PercentageMatchV2_URLtoURL"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } else { histogram_name = "InstantExtended.PercentageMatchV2_URLtoQuery"; UMA_HISTOGRAM_PERCENTAGE(histogram_name, percent); } } std::string suffix = "large"; for (size_t i = 0; i < arraysize(kPercentageMatchHistogramWidthBuckets); ++i) { if (omnibox_width < kPercentageMatchHistogramWidthBuckets[i]) { suffix = base::IntToString(kPercentageMatchHistogramWidthBuckets[i]); break; } } base::HistogramBase* counter = base::LinearHistogram::FactoryGet( histogram_name + "_" + suffix, 1, 101, 102, base::Histogram::kUmaTargetedHistogramFlag); counter->Add(percent); }
C
Chrome
0
CVE-2013-2902
https://www.cvedetails.com/cve/CVE-2013-2902/
CWE-399
https://github.com/chromium/chromium/commit/87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
Removed pinch viewport scroll offset distribution The associated change in Blink makes the pinch viewport a proper ScrollableArea meaning the normal path for synchronizing layer scroll offsets is used. This is a 2 sided patch, the other CL: https://codereview.chromium.org/199253002/ BUG=349941 Review URL: https://codereview.chromium.org/210543002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260105 0039d316-1c4b-4281-b951-d872f2087c98
static void PostCopyCallbackToMainThread( scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner, scoped_ptr<CopyOutputRequest> request, scoped_ptr<CopyOutputResult> result) { main_thread_task_runner->PostTask(FROM_HERE, base::Bind(&RunCopyCallbackOnMainThread, base::Passed(&request), base::Passed(&result))); }
static void PostCopyCallbackToMainThread( scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner, scoped_ptr<CopyOutputRequest> request, scoped_ptr<CopyOutputResult> result) { main_thread_task_runner->PostTask(FROM_HERE, base::Bind(&RunCopyCallbackOnMainThread, base::Passed(&request), base::Passed(&result))); }
C
Chrome
0
CVE-2016-5219
https://www.cvedetails.com/cve/CVE-2016-5219/
CWE-416
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
a4150b688a754d3d10d2ca385155b1c95d77d6ae
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM This makes the query of GL_COMPLETION_STATUS_KHR to programs much cheaper by minimizing the round-trip to the GPU thread. Bug: 881152, 957001 Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630 Commit-Queue: Kenneth Russell <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Reviewed-by: Geoff Lang <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Cr-Commit-Position: refs/heads/master@{#657568}
error::Error GLES2DecoderPassthroughImpl::DoUniform2uiv( GLint location, GLsizei count, const volatile GLuint* v) { api()->glUniform2uivFn(location, count, const_cast<const GLuint*>(v)); return error::kNoError; }
error::Error GLES2DecoderPassthroughImpl::DoUniform2uiv( GLint location, GLsizei count, const volatile GLuint* v) { api()->glUniform2uivFn(location, count, const_cast<const GLuint*>(v)); return error::kNoError; }
C
Chrome
0
CVE-2016-2464
https://www.cvedetails.com/cve/CVE-2016-2464/
CWE-20
https://android.googlesource.com/platform/external/libvpx/+/cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
external/libvpx/libwebm: Update snapshot Update libwebm snapshot. This update contains security fixes from upstream. Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b BUG=23167726 Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207 (cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a)
long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { if (!pReader) return E_FILE_FORMAT_INVALID; long long total, available; long status = pReader->Length(&total, &available); if (status < 0) // error return status; pos = 0; long long end = (available >= 1024) ? 1024 : available; for (;;) { unsigned char b = 0; while (pos < end) { status = pReader->Read(pos, 1, &b); if (status < 0) // error return status; if (b == 0x1A) break; ++pos; } if (b != 0x1A) { if (pos >= 1024) return E_FILE_FORMAT_INVALID; // don't bother looking anymore if ((total >= 0) && ((total - available) < 5)) return E_FILE_FORMAT_INVALID; return available + 5; // 5 = 4-byte ID + 1st byte of size } if ((total >= 0) && ((total - pos) < 5)) return E_FILE_FORMAT_INVALID; if ((available - pos) < 5) return pos + 5; // try again later long len; const long long result = ReadUInt(pReader, pos, len); if (result < 0) // error return result; if (result == 0x0A45DFA3) { // EBML Header ID pos += len; // consume ID break; } ++pos; // throw away just the 0x1A byte, and try again } long len; long long result = GetUIntLength(pReader, pos, len); if (result < 0) // error return result; if (result > 0) // need more data return result; if (len < 1 || len > 8) return E_FILE_FORMAT_INVALID; if ((total >= 0) && ((total - pos) < len)) return E_FILE_FORMAT_INVALID; if ((available - pos) < len) return pos + len; // try again later result = ReadUInt(pReader, pos, len); if (result < 0) // error return result; pos += len; // consume size field if ((total >= 0) && ((total - pos) < result)) return E_FILE_FORMAT_INVALID; if ((available - pos) < result) return pos + result; end = pos + result; Init(); while (pos < end) { long long id, size; status = ParseElementHeader(pReader, pos, end, id, size); if (status < 0) // error return status; if (size == 0) // weird return E_FILE_FORMAT_INVALID; if (id == 0x0286) { // version m_version = UnserializeUInt(pReader, pos, size); if (m_version <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x02F7) { // read version m_readVersion = UnserializeUInt(pReader, pos, size); if (m_readVersion <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x02F2) { // max id length m_maxIdLength = UnserializeUInt(pReader, pos, size); if (m_maxIdLength <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x02F3) { // max size length m_maxSizeLength = UnserializeUInt(pReader, pos, size); if (m_maxSizeLength <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x0282) { // doctype if (m_docType) return E_FILE_FORMAT_INVALID; status = UnserializeString(pReader, pos, size, m_docType); if (status) // error return status; } else if (id == 0x0287) { // doctype version m_docTypeVersion = UnserializeUInt(pReader, pos, size); if (m_docTypeVersion <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x0285) { // doctype read version m_docTypeReadVersion = UnserializeUInt(pReader, pos, size); if (m_docTypeReadVersion <= 0) return E_FILE_FORMAT_INVALID; } pos += size; } if (pos != end) return E_FILE_FORMAT_INVALID; return 0; }
long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { assert(pReader); long long total, available; long status = pReader->Length(&total, &available); if (status < 0) // error return status; pos = 0; long long end = (available >= 1024) ? 1024 : available; for (;;) { unsigned char b = 0; while (pos < end) { status = pReader->Read(pos, 1, &b); if (status < 0) // error return status; if (b == 0x1A) break; ++pos; } if (b != 0x1A) { if (pos >= 1024) return E_FILE_FORMAT_INVALID; // don't bother looking anymore if ((total >= 0) && ((total - available) < 5)) return E_FILE_FORMAT_INVALID; return available + 5; // 5 = 4-byte ID + 1st byte of size } if ((total >= 0) && ((total - pos) < 5)) return E_FILE_FORMAT_INVALID; if ((available - pos) < 5) return pos + 5; // try again later long len; const long long result = ReadUInt(pReader, pos, len); if (result < 0) // error return result; if (result == 0x0A45DFA3) { // EBML Header ID pos += len; // consume ID break; } ++pos; // throw away just the 0x1A byte, and try again } long len; long long result = GetUIntLength(pReader, pos, len); if (result < 0) // error return result; if (result > 0) // need more data return result; assert(len > 0); assert(len <= 8); if ((total >= 0) && ((total - pos) < len)) return E_FILE_FORMAT_INVALID; if ((available - pos) < len) return pos + len; // try again later result = ReadUInt(pReader, pos, len); if (result < 0) // error return result; pos += len; // consume size field if ((total >= 0) && ((total - pos) < result)) return E_FILE_FORMAT_INVALID; if ((available - pos) < result) return pos + result; end = pos + result; Init(); while (pos < end) { long long id, size; status = ParseElementHeader(pReader, pos, end, id, size); if (status < 0) // error return status; if (size == 0) // weird return E_FILE_FORMAT_INVALID; if (id == 0x0286) { // version m_version = UnserializeUInt(pReader, pos, size); if (m_version <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x02F7) { // read version m_readVersion = UnserializeUInt(pReader, pos, size); if (m_readVersion <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x02F2) { // max id length m_maxIdLength = UnserializeUInt(pReader, pos, size); if (m_maxIdLength <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x02F3) { // max size length m_maxSizeLength = UnserializeUInt(pReader, pos, size); if (m_maxSizeLength <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x0282) { // doctype if (m_docType) return E_FILE_FORMAT_INVALID; status = UnserializeString(pReader, pos, size, m_docType); if (status) // error return status; } else if (id == 0x0287) { // doctype version m_docTypeVersion = UnserializeUInt(pReader, pos, size); if (m_docTypeVersion <= 0) return E_FILE_FORMAT_INVALID; } else if (id == 0x0285) { // doctype read version m_docTypeReadVersion = UnserializeUInt(pReader, pos, size); if (m_docTypeReadVersion <= 0) return E_FILE_FORMAT_INVALID; } pos += size; } assert(pos == end); return 0; }
C
Android
1
CVE-2014-0131
https://www.cvedetails.com/cve/CVE-2014-0131/
CWE-416
https://github.com/torvalds/linux/commit/1fd819ecb90cc9b822cd84d3056ddba315d3340f
1fd819ecb90cc9b822cd84d3056ddba315d3340f
skbuff: skb_segment: orphan frags before copying skb_segment copies frags around, so we need to copy them carefully to avoid accessing user memory after reporting completion to userspace through a callback. skb_segment doesn't normally happen on datapath: TSO needs to be disabled - so disabling zero copy in this case does not look like a big deal. Signed-off-by: Michael S. Tsirkin <[email protected]> Acked-by: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize) { int i, delta, len = from->len; *fragstolen = false; if (skb_cloned(to)) return false; if (len <= skb_tailroom(to)) { BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); *delta_truesize = 0; return true; } if (skb_has_frag_list(to) || skb_has_frag_list(from)) return false; if (skb_headlen(from) != 0) { struct page *page; unsigned int offset; if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) return false; if (skb_head_is_locked(from)) return false; delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); page = virt_to_head_page(from->head); offset = from->data - (unsigned char *)page_address(page); skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); *fragstolen = true; } else { if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) return false; delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); } WARN_ON_ONCE(delta < len); memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, skb_shinfo(from)->frags, skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; if (!skb_cloned(from)) skb_shinfo(from)->nr_frags = 0; /* if the skb is not cloned this does nothing * since we set nr_frags to 0. */ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) skb_frag_ref(from, i); to->truesize += delta; to->len += len; to->data_len += len; *delta_truesize = delta; return true; }
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize) { int i, delta, len = from->len; *fragstolen = false; if (skb_cloned(to)) return false; if (len <= skb_tailroom(to)) { BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); *delta_truesize = 0; return true; } if (skb_has_frag_list(to) || skb_has_frag_list(from)) return false; if (skb_headlen(from) != 0) { struct page *page; unsigned int offset; if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) return false; if (skb_head_is_locked(from)) return false; delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); page = virt_to_head_page(from->head); offset = from->data - (unsigned char *)page_address(page); skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, page, offset, skb_headlen(from)); *fragstolen = true; } else { if (skb_shinfo(to)->nr_frags + skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) return false; delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); } WARN_ON_ONCE(delta < len); memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, skb_shinfo(from)->frags, skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; if (!skb_cloned(from)) skb_shinfo(from)->nr_frags = 0; /* if the skb is not cloned this does nothing * since we set nr_frags to 0. */ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) skb_frag_ref(from, i); to->truesize += delta; to->len += len; to->data_len += len; *delta_truesize = delta; return true; }
C
linux
0
CVE-2019-15938
https://www.cvedetails.com/cve/CVE-2019-15938/
CWE-119
https://git.pengutronix.de/cgit/barebox/commit/fs/nfs.c?h=next&id=574ce994016107ad8ab0f845a785f28d7eaa5208
574ce994016107ad8ab0f845a785f28d7eaa5208
null
static int decode_filename(struct xdr_stream *xdr, char *name, u32 *length) { __be32 *p; u32 count; p = xdr_inline_decode(xdr, 4); if (!p) goto out_overflow; count = ntoh32(net_read_uint32(p)); if (count > 255) goto out_nametoolong; p = xdr_inline_decode(xdr, count); if (!p) goto out_overflow; memcpy(name, p, count); name[count] = 0; *length = count; return 0; out_nametoolong: pr_err("%s: returned a too long filename: %u\n", __func__, count); return -ENAMETOOLONG; out_overflow: pr_err("%s: premature end of packet\n", __func__); return -EIO; }
static int decode_filename(struct xdr_stream *xdr, char *name, u32 *length) { __be32 *p; u32 count; p = xdr_inline_decode(xdr, 4); if (!p) goto out_overflow; count = ntoh32(net_read_uint32(p)); if (count > 255) goto out_nametoolong; p = xdr_inline_decode(xdr, count); if (!p) goto out_overflow; memcpy(name, p, count); name[count] = 0; *length = count; return 0; out_nametoolong: pr_err("%s: returned a too long filename: %u\n", __func__, count); return -ENAMETOOLONG; out_overflow: pr_err("%s: premature end of packet\n", __func__); return -EIO; }
C
pengutronix
0
CVE-2015-1352
https://www.cvedetails.com/cve/CVE-2015-1352/
null
https://git.php.net/?p=php-src.git;a=commit;h=124fb22a13fafa3648e4e15b4f207c7096d8155e
124fb22a13fafa3648e4e15b4f207c7096d8155e
null
PHP_FUNCTION(pg_field_is_null) { php_pgsql_data_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_PG_DATA_ISNULL); }
PHP_FUNCTION(pg_field_is_null) { php_pgsql_data_info(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_PG_DATA_ISNULL); }
C
php
0
CVE-2017-5061
https://www.cvedetails.com/cve/CVE-2017-5061/
CWE-362
https://github.com/chromium/chromium/commit/5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
(Reland) Discard compositor frames from unloaded web content This is a reland of https://codereview.chromium.org/2707243005/ with a small change to fix an uninitialized memory error that fails on MSAN bots. BUG=672847 [email protected], [email protected] CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_site_isolation Review-Url: https://codereview.chromium.org/2731283003 Cr-Commit-Position: refs/heads/master@{#454954}
bool CheckStep(T next, T* var) { int expected = next - 1; EXPECT_EQ(expected, *var); bool correct = expected == *var; *var = next; return correct; }
bool CheckStep(T next, T* var) { int expected = next - 1; EXPECT_EQ(expected, *var); bool correct = expected == *var; *var = next; return correct; }
C
Chrome
0
CVE-2015-6791
https://www.cvedetails.com/cve/CVE-2015-6791/
null
https://github.com/chromium/chromium/commit/7e995b26a5a503adefc0ad40435f7e16a45434c2
7e995b26a5a503adefc0ad40435f7e16a45434c2
Add a fake DriveFS launcher client. Using DriveFS requires building and deploying ChromeOS. Add a client for the fake DriveFS launcher to allow the use of a real DriveFS from a ChromeOS chroot to be used with a target_os="chromeos" build of chrome. This connects to the fake DriveFS launcher using mojo over a unix domain socket named by a command-line flag, using the launcher to create DriveFS instances. Bug: 848126 Change-Id: I22dcca154d41bda196dd7c1782bb503f6bcba5b1 Reviewed-on: https://chromium-review.googlesource.com/1098434 Reviewed-by: Xiyuan Xia <[email protected]> Commit-Queue: Sam McNally <[email protected]> Cr-Commit-Position: refs/heads/master@{#567513}
bool IsZipArchiverPackerEnabled() { return !base::CommandLine::ForCurrentProcess()->HasSwitch( kDisableZipArchiverPacker); }
bool IsZipArchiverPackerEnabled() { return !base::CommandLine::ForCurrentProcess()->HasSwitch( kDisableZipArchiverPacker); }
C
Chrome
0
CVE-2019-15922
https://www.cvedetails.com/cve/CVE-2019-15922/
CWE-476
https://github.com/torvalds/linux/commit/58ccd2d31e502c37e108b285bf3d343eb00c235b
58ccd2d31e502c37e108b285bf3d343eb00c235b
paride/pf: Fix potential NULL pointer dereference Syzkaller report this: pf: pf version 1.04, major 47, cluster 64, nice 0 pf: No ATAPI disk detected kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN PTI CPU: 0 PID: 9887 Comm: syz-executor.0 Tainted: G C 5.1.0-rc3+ #8 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 RIP: 0010:pf_init+0x7af/0x1000 [pf] Code: 46 77 d2 48 89 d8 48 c1 e8 03 80 3c 28 00 74 08 48 89 df e8 03 25 a6 d2 4c 8b 23 49 8d bc 24 80 05 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 74 05 e8 e6 24 a6 d2 49 8b bc 24 80 05 00 00 e8 79 34 RSP: 0018:ffff8881abcbf998 EFLAGS: 00010202 RAX: 00000000000000b0 RBX: ffffffffc1e4a8a8 RCX: ffffffffaec50788 RDX: 0000000000039b10 RSI: ffffc9000153c000 RDI: 0000000000000580 RBP: dffffc0000000000 R08: ffffed103ee44e59 R09: ffffed103ee44e59 R10: 0000000000000001 R11: ffffed103ee44e58 R12: 0000000000000000 R13: ffffffffc1e4b028 R14: 0000000000000000 R15: 0000000000000020 FS: 00007f1b78a91700(0000) GS:ffff8881f7200000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f6d72b207f8 CR3: 00000001d5790004 CR4: 00000000007606f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? 0xffffffffc1e50000 do_one_initcall+0xbc/0x47d init/main.c:901 do_init_module+0x1b5/0x547 kernel/module.c:3456 load_module+0x6405/0x8c10 kernel/module.c:3804 __do_sys_finit_module+0x162/0x190 kernel/module.c:3898 do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x462e99 Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f1b78a90c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99 RDX: 0000000000000000 RSI: 0000000020000180 RDI: 0000000000000003 RBP: 00007f1b78a90c70 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f1b78a916bc R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004 Modules linked in: pf(+) paride gpio_tps65218 tps65218 i2c_cht_wc ati_remote dc395x act_meta_skbtcindex act_ife ife ecdh_generic rc_xbox_dvd sky81452_regulator v4l2_fwnode leds_blinkm snd_usb_hiface comedi(C) aes_ti slhc cfi_cmdset_0020 mtd cfi_util sx8654 mdio_gpio of_mdio fixed_phy mdio_bitbang libphy alcor_pci matrix_keymap hid_uclogic usbhid scsi_transport_fc videobuf2_v4l2 videobuf2_dma_sg snd_soc_pcm179x_spi snd_soc_pcm179x_codec i2c_demux_pinctrl mdev snd_indigodj isl6405 mii enc28j60 cmac adt7316_i2c(C) adt7316(C) fmc_trivial fmc nf_reject_ipv4 authenc rc_dtt200u rtc_ds1672 dvb_usb_dibusb_mc dvb_usb_dibusb_mc_common dib3000mc dibx000_common dvb_usb_dibusb_common dvb_usb dvb_core videobuf2_common videobuf2_vmalloc videobuf2_memops regulator_haptic adf7242 mac802154 ieee802154 s5h1409 da9034_ts snd_intel8x0m wmi cx24120 usbcore sdhci_cadence sdhci_pltfm sdhci mmc_core joydev i2c_algo_bit scsi_transport_iscsi iscsi_boot_sysfs ves1820 lockd grace nfs_acl auth_rpcgss sunrp c ip_vs snd_soc_adau7002 snd_cs4281 snd_rawmidi gameport snd_opl3_lib snd_seq_device snd_hwdep snd_ac97_codec ad7418 hid_primax hid snd_soc_cs4265 snd_soc_core snd_pcm_dmaengine snd_pcm snd_timer ac97_bus snd_compress snd soundcore ti_adc108s102 eeprom_93cx6 i2c_algo_pca mlxreg_hotplug st_pressure st_sensors industrialio_triggered_buffer kfifo_buf industrialio v4l2_common videodev media snd_soc_adau_utils rc_pinnacle_grey rc_core pps_gpio leds_lm3692x nandcore ledtrig_pattern iptable_security iptable_raw iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bpfilter ip6_vti ip_vti ip_gre ipip sit tunnel4 ip_tunnel hsr veth netdevsim vxcan batman_adv cfg80211 rfkill chnl_net caif nlmon dummy team bonding vcan bridge stp llc ip6_gre gre ip6_tunnel tunnel6 tun mousedev ppdev tpm kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel ide_pci_generic aes_x86_64 piix crypto_simd input_leds psmouse cryp td glue_helper ide_core intel_agp serio_raw intel_gtt agpgart ata_generic i2c_piix4 pata_acpi parport_pc parport rtc_cmos floppy sch_fq_codel ip_tables x_tables sha1_ssse3 sha1_generic ipv6 [last unloaded: paride] Dumping ftrace buffer: (ftrace buffer empty) ---[ end trace 7a818cf5f210d79e ]--- If alloc_disk fails in pf_init_units, pf->disk will be NULL, however in pf_detect and pf_exit, it's not check this before free.It may result a NULL pointer dereference. Also when register_blkdev failed, blk_cleanup_queue() and blk_mq_free_tag_set() should be called to free resources. Reported-by: Hulk Robot <[email protected]> Fixes: 6ce59025f118 ("paride/pf: cleanup queues when detection fails") Signed-off-by: YueHaibing <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static inline int read_reg(struct pf_unit *pf, int reg) { return pi_read_regr(pf->pi, 0, reg); }
static inline int read_reg(struct pf_unit *pf, int reg) { return pi_read_regr(pf->pi, 0, reg); }
C
linux
0
CVE-2015-6763
https://www.cvedetails.com/cve/CVE-2015-6763/
null
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
MacViews: Enable secure text input for password Textfields. In Cocoa the NSTextInputContext automatically enables secure text input when activated and it's in the secure text entry mode. RenderWidgetHostViewMac did the similar thing for ages following the WebKit example. views::Textfield needs to do the same thing in a fashion that's sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions are possible when the Textfield gets focus, activates the secure text input mode and the RWHVM loses focus immediately afterwards and disables the secure text input instead of leaving it in the enabled state. BUG=818133,677220 Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b Reviewed-on: https://chromium-review.googlesource.com/943064 Commit-Queue: Michail Pishchagin <[email protected]> Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Avi Drissman <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#542517}
IntRect FrameSelection::ComputeRectToScroll( RevealExtentOption reveal_extent_option) { const VisibleSelection& selection = ComputeVisibleSelectionInDOMTree(); if (selection.IsCaret()) return AbsoluteCaretBounds(); DCHECK(selection.IsRange()); if (reveal_extent_option == kRevealExtent) return AbsoluteCaretBoundsOf(CreateVisiblePosition(selection.Extent())); layout_selection_->SetHasPendingSelection(); return layout_selection_->AbsoluteSelectionBounds(); }
IntRect FrameSelection::ComputeRectToScroll( RevealExtentOption reveal_extent_option) { const VisibleSelection& selection = ComputeVisibleSelectionInDOMTree(); if (selection.IsCaret()) return AbsoluteCaretBounds(); DCHECK(selection.IsRange()); if (reveal_extent_option == kRevealExtent) return AbsoluteCaretBoundsOf(CreateVisiblePosition(selection.Extent())); layout_selection_->SetHasPendingSelection(); return layout_selection_->AbsoluteSelectionBounds(); }
C
Chrome
0
CVE-2011-1927
https://www.cvedetails.com/cve/CVE-2011-1927/
null
https://github.com/torvalds/linux/commit/64f3b9e203bd06855072e295557dca1485a2ecba
64f3b9e203bd06855072e295557dca1485a2ecba
net: ip_expire() must revalidate route Commit 4a94445c9a5c (net: Use ip_route_input_noref() in input path) added a bug in IP defragmentation handling, in case timeout is fired. When a frame is defragmented, we use last skb dst field when building final skb. Its dst is valid, since we are in rcu read section. But if a timeout occurs, we take first queued fragment to build one ICMP TIME EXCEEDED message. Problem is all queued skb have weak dst pointers, since we escaped RCU critical section after their queueing. icmp_send() might dereference a now freed (and possibly reused) part of memory. Calling skb_dst_drop() and ip_route_input_noref() to revalidate route is the only possible choice. Reported-by: Denys Fedoryshchenko <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int ip_frag_nqueues(struct net *net) { return net->ipv4.frags.nqueues; }
int ip_frag_nqueues(struct net *net) { return net->ipv4.frags.nqueues; }
C
linux
0
CVE-2015-1213
https://www.cvedetails.com/cve/CVE-2015-1213/
CWE-119
https://github.com/chromium/chromium/commit/faaa2fd0a05f1622d9a8806da118d4f3b602e707
faaa2fd0a05f1622d9a8806da118d4f3b602e707
[Blink>Media] Allow autoplay muted on Android by default There was a mistake causing autoplay muted is shipped on Android but it will be disabled if the chromium embedder doesn't specify content setting for "AllowAutoplay" preference. This CL makes the AllowAutoplay preference true by default so that it is allowed by embedders (including AndroidWebView) unless they explicitly disable it. Intent to ship: https://groups.google.com/a/chromium.org/d/msg/blink-dev/Q1cnzNI2GpI/AL_eyUNABgAJ BUG=689018 Review-Url: https://codereview.chromium.org/2677173002 Cr-Commit-Position: refs/heads/master@{#448423}
bool HTMLMediaElement::hasPendingActivity() const { if (m_shouldDelayLoadEvent) return true; if (m_networkState == kNetworkLoading) return true; { AutoReset<bool> scope(&m_officialPlaybackPositionNeedsUpdate, false); if (couldPlayIfEnoughData()) return true; } if (m_seeking) return true; if (m_mediaSource) return true; if (m_asyncEventQueue->hasPendingEvents()) return true; return false; }
bool HTMLMediaElement::hasPendingActivity() const { if (m_shouldDelayLoadEvent) return true; if (m_networkState == kNetworkLoading) return true; { AutoReset<bool> scope(&m_officialPlaybackPositionNeedsUpdate, false); if (couldPlayIfEnoughData()) return true; } if (m_seeking) return true; if (m_mediaSource) return true; if (m_asyncEventQueue->hasPendingEvents()) return true; return false; }
C
Chrome
0
CVE-2014-5139
https://www.cvedetails.com/cve/CVE-2014-5139/
null
https://git.openssl.org/gitweb/?p=openssl.git;a=commit;h=80bd7b41b30af6ee96f519e629463583318de3b0
80bd7b41b30af6ee96f519e629463583318de3b0
null
static int tls12_find_id(int nid, tls12_lookup *table, size_t tlen) { size_t i; for (i = 0; i < tlen; i++) { if (table[i].nid == nid) return table[i].id; } return -1; }
static int tls12_find_id(int nid, tls12_lookup *table, size_t tlen) { size_t i; for (i = 0; i < tlen; i++) { if (table[i].nid == nid) return table[i].id; } return -1; }
C
openssl
0
CVE-2015-1265
https://www.cvedetails.com/cve/CVE-2015-1265/
null
https://github.com/chromium/chromium/commit/8ea5693d5cf304e56174bb6b65412f04209904db
8ea5693d5cf304e56174bb6b65412f04209904db
Move Editor::Transpose() out of Editor class This patch moves |Editor::Transpose()| out of |Editor| class as preparation of expanding it into |ExecutTranspose()| in "EditorCommand.cpp" to make |Editor| class simpler for improving code health. Following patch will expand |Transpose()| into |ExecutTranspose()|. Bug: 672405 Change-Id: Icde253623f31813d2b4517c4da7d4798bd5fadf6 Reviewed-on: https://chromium-review.googlesource.com/583880 Reviewed-by: Xiaocheng Hu <[email protected]> Commit-Queue: Yoshifumi Inoue <[email protected]> Cr-Commit-Position: refs/heads/master@{#489518}
static bool ExecuteForeColor(LocalFrame& frame, Event*, EditorCommandSource source, const String& value) { return ExecuteApplyStyle(frame, source, InputEvent::InputType::kNone, CSSPropertyColor, value); }
static bool ExecuteForeColor(LocalFrame& frame, Event*, EditorCommandSource source, const String& value) { return ExecuteApplyStyle(frame, source, InputEvent::InputType::kNone, CSSPropertyColor, value); }
C
Chrome
0
CVE-2012-2844
https://www.cvedetails.com/cve/CVE-2012-2844/
null
https://github.com/chromium/chromium/commit/46afbe7f7f55280947e9c06c429a68983ba9d8dd
46afbe7f7f55280947e9c06c429a68983ba9d8dd
[EFL][WK2] Add --window-size command line option to EFL MiniBrowser https://bugs.webkit.org/show_bug.cgi?id=100942 Patch by Mikhail Pozdnyakov <[email protected]> on 2012-11-05 Reviewed by Kenneth Rohde Christiansen. Added window-size (-s) command line option to EFL MiniBrowser. * MiniBrowser/efl/main.c: (window_create): (parse_window_size): (elm_main): git-svn-id: svn://svn.chromium.org/blink/trunk@133450 bbb929c8-8fbe-4397-9dbb-9b2b20218538
on_auth_ok(void *user_data, Evas_Object *obj, void *event_info) { AuthData *auth_data = (AuthData *)user_data; const char *username = elm_entry_entry_get(auth_data->username_entry); const char *password = elm_entry_entry_get(auth_data->password_entry); ewk_auth_request_authenticate(auth_data->request, username, password); auth_popup_close(auth_data); }
on_auth_ok(void *user_data, Evas_Object *obj, void *event_info) { AuthData *auth_data = (AuthData *)user_data; const char *username = elm_entry_entry_get(auth_data->username_entry); const char *password = elm_entry_entry_get(auth_data->password_entry); ewk_auth_request_authenticate(auth_data->request, username, password); auth_popup_close(auth_data); }
C
Chrome
0
CVE-2016-2860
https://www.cvedetails.com/cve/CVE-2016-2860/
CWE-284
http://git.openafs.org/?p=openafs.git;a=commitdiff;h=396240cf070a806b91fea81131d034e1399af1e0
396240cf070a806b91fea81131d034e1399af1e0
null
listSuperGroups(struct rx_call *call, afs_int32 aid, prlist *alist, afs_int32 *over, afs_int32 *cid) { afs_int32 code; struct ubik_trans *tt; afs_int32 temp; struct prentry tentry; alist->prlist_len = 0; alist->prlist_val = (afs_int32 *) 0; code = Initdb(); if (code != PRSUCCESS) goto done; code = ubik_BeginTransReadAny(dbase, UBIK_READTRANS, &tt); if (code) goto done; code = ubik_SetLock(tt, 1, 1, LOCKREAD); if (code) ABORT_WITH(tt, code); code = WhoIsThis(call, tt, cid); if (code) ABORT_WITH(tt, PRPERM); temp = FindByID(tt, aid); if (!temp) ABORT_WITH(tt, PRNOENT); code = pr_ReadEntry(tt, 0, temp, &tentry); if (code) ABORT_WITH(tt, code); if (!AccessOK(tt, *cid, &tentry, PRP_MEMBER_MEM, PRP_MEMBER_ANY)) ABORT_WITH(tt, PRPERM); code = GetSGList(tt, &tentry, alist); *over = 0; if (code == PRTOOMANY) *over = 1; else if (code != PRSUCCESS) ABORT_WITH(tt, code); code = ubik_EndTrans(tt); done: return code; }
listSuperGroups(struct rx_call *call, afs_int32 aid, prlist *alist, afs_int32 *over, afs_int32 *cid) { afs_int32 code; struct ubik_trans *tt; afs_int32 temp; struct prentry tentry; alist->prlist_len = 0; alist->prlist_val = (afs_int32 *) 0; code = Initdb(); if (code != PRSUCCESS) goto done; code = ubik_BeginTransReadAny(dbase, UBIK_READTRANS, &tt); if (code) goto done; code = ubik_SetLock(tt, 1, 1, LOCKREAD); if (code) ABORT_WITH(tt, code); code = WhoIsThis(call, tt, cid); if (code) ABORT_WITH(tt, PRPERM); temp = FindByID(tt, aid); if (!temp) ABORT_WITH(tt, PRNOENT); code = pr_ReadEntry(tt, 0, temp, &tentry); if (code) ABORT_WITH(tt, code); if (!AccessOK(tt, *cid, &tentry, PRP_MEMBER_MEM, PRP_MEMBER_ANY)) ABORT_WITH(tt, PRPERM); code = GetSGList(tt, &tentry, alist); *over = 0; if (code == PRTOOMANY) *over = 1; else if (code != PRSUCCESS) ABORT_WITH(tt, code); code = ubik_EndTrans(tt); done: return code; }
C
openafs
0
CVE-2011-2804
https://www.cvedetails.com/cve/CVE-2011-2804/
CWE-399
https://github.com/chromium/chromium/commit/dc7b094a338c6c521f918f478e993f0f74bbea0d
dc7b094a338c6c521f918f478e993f0f74bbea0d
Remove use of libcros from InputMethodLibrary. BUG=chromium-os:16238 TEST==confirm that input methods work as before on the netbook. Also confirm that the chrome builds and works on the desktop as before. Review URL: http://codereview.chromium.org/7003086 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89142 0039d316-1c4b-4281-b951-d872f2087c98
void StopInputMethodDaemon() { if (!initialized_successfully_) return; should_launch_ime_ = false; if (ibus_daemon_process_handle_ != base::kNullProcessHandle) { const base::ProcessId pid = base::GetProcId(ibus_daemon_process_handle_); if (!ibus_controller_->StopInputMethodProcess()) { LOG(ERROR) << "StopInputMethodProcess IPC failed. Sending SIGTERM to " << "PID " << pid; base::KillProcess(ibus_daemon_process_handle_, -1, false /* wait */); } VLOG(1) << "ibus-daemon (PID=" << pid << ") is terminated"; ibus_daemon_process_handle_ = base::kNullProcessHandle; } }
void StopInputMethodDaemon() { if (!initialized_successfully_) return; should_launch_ime_ = false; if (ibus_daemon_process_handle_ != base::kNullProcessHandle) { const base::ProcessId pid = base::GetProcId(ibus_daemon_process_handle_); if (!chromeos::StopInputMethodProcess(input_method_status_connection_)) { LOG(ERROR) << "StopInputMethodProcess IPC failed. Sending SIGTERM to " << "PID " << pid; base::KillProcess(ibus_daemon_process_handle_, -1, false /* wait */); } VLOG(1) << "ibus-daemon (PID=" << pid << ") is terminated"; ibus_daemon_process_handle_ = base::kNullProcessHandle; } }
C
Chrome
1
CVE-2013-1788
https://www.cvedetails.com/cve/CVE-2013-1788/
CWE-119
https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=8b6dc55e530b2f5ede6b9dfb64aafdd1d5836492
8b6dc55e530b2f5ede6b9dfb64aafdd1d5836492
null
void Splash::arbitraryTransformMask(SplashImageMaskSource src, void *srcData, int srcWidth, int srcHeight, SplashCoord *mat, GBool glyphMode) { SplashBitmap *scaledMask; SplashClipResult clipRes, clipRes2; SplashPipe pipe; int scaledWidth, scaledHeight, t0, t1; SplashCoord r00, r01, r10, r11, det, ir00, ir01, ir10, ir11; SplashCoord vx[4], vy[4]; int xMin, yMin, xMax, yMax; ImageSection section[3]; int nSections; int y, xa, xb, x, i, xx, yy; vx[0] = mat[4]; vy[0] = mat[5]; vx[1] = mat[2] + mat[4]; vy[1] = mat[3] + mat[5]; vx[2] = mat[0] + mat[2] + mat[4]; vy[2] = mat[1] + mat[3] + mat[5]; vx[3] = mat[0] + mat[4]; vy[3] = mat[1] + mat[5]; xMin = imgCoordMungeLowerC(vx[0], glyphMode); xMax = imgCoordMungeUpperC(vx[0], glyphMode); yMin = imgCoordMungeLowerC(vy[0], glyphMode); yMax = imgCoordMungeUpperC(vy[0], glyphMode); for (i = 1; i < 4; ++i) { t0 = imgCoordMungeLowerC(vx[i], glyphMode); if (t0 < xMin) { xMin = t0; } t0 = imgCoordMungeUpperC(vx[i], glyphMode); if (t0 > xMax) { xMax = t0; } t1 = imgCoordMungeLowerC(vy[i], glyphMode); if (t1 < yMin) { yMin = t1; } t1 = imgCoordMungeUpperC(vy[i], glyphMode); if (t1 > yMax) { yMax = t1; } } clipRes = state->clip->testRect(xMin, yMin, xMax - 1, yMax - 1); opClipRes = clipRes; if (clipRes == splashClipAllOutside) { return; } if (mat[0] >= 0) { t0 = imgCoordMungeUpperC(mat[0] + mat[4], glyphMode) - imgCoordMungeLowerC(mat[4], glyphMode); } else { t0 = imgCoordMungeUpperC(mat[4], glyphMode) - imgCoordMungeLowerC(mat[0] + mat[4], glyphMode); } if (mat[1] >= 0) { t1 = imgCoordMungeUpperC(mat[1] + mat[5], glyphMode) - imgCoordMungeLowerC(mat[5], glyphMode); } else { t1 = imgCoordMungeUpperC(mat[5], glyphMode) - imgCoordMungeLowerC(mat[1] + mat[5], glyphMode); } scaledWidth = t0 > t1 ? t0 : t1; if (mat[2] >= 0) { t0 = imgCoordMungeUpperC(mat[2] + mat[4], glyphMode) - imgCoordMungeLowerC(mat[4], glyphMode); } else { t0 = imgCoordMungeUpperC(mat[4], glyphMode) - imgCoordMungeLowerC(mat[2] + mat[4], glyphMode); } if (mat[3] >= 0) { t1 = imgCoordMungeUpperC(mat[3] + mat[5], glyphMode) - imgCoordMungeLowerC(mat[5], glyphMode); } else { t1 = imgCoordMungeUpperC(mat[5], glyphMode) - imgCoordMungeLowerC(mat[3] + mat[5], glyphMode); } scaledHeight = t0 > t1 ? t0 : t1; if (scaledWidth == 0) { scaledWidth = 1; } if (scaledHeight == 0) { scaledHeight = 1; } r00 = mat[0] / scaledWidth; r01 = mat[1] / scaledWidth; r10 = mat[2] / scaledHeight; r11 = mat[3] / scaledHeight; det = r00 * r11 - r01 * r10; if (splashAbs(det) < 1e-6) { return; } ir00 = r11 / det; ir01 = -r01 / det; ir10 = -r10 / det; ir11 = r00 / det; scaledMask = scaleMask(src, srcData, srcWidth, srcHeight, scaledWidth, scaledHeight); if (scaledMask->data == NULL) { error(errInternal, -1, "scaledMask->data is NULL in Splash::arbitraryTransformMask"); delete scaledMask; return; } i = (vy[2] <= vy[3]) ? 2 : 3; if (vy[1] <= vy[i]) { i = 1; } if (vy[0] < vy[i] || (i != 3 && vy[0] == vy[i])) { i = 0; } if (vy[i] == vy[(i+1) & 3]) { section[0].y0 = imgCoordMungeLowerC(vy[i], glyphMode); section[0].y1 = imgCoordMungeUpperC(vy[(i+2) & 3], glyphMode) - 1; if (vx[i] < vx[(i+1) & 3]) { section[0].ia0 = i; section[0].ia1 = (i+3) & 3; section[0].ib0 = (i+1) & 3; section[0].ib1 = (i+2) & 3; } else { section[0].ia0 = (i+1) & 3; section[0].ia1 = (i+2) & 3; section[0].ib0 = i; section[0].ib1 = (i+3) & 3; } nSections = 1; } else { section[0].y0 = imgCoordMungeLowerC(vy[i], glyphMode); section[2].y1 = imgCoordMungeUpperC(vy[(i+2) & 3], glyphMode) - 1; section[0].ia0 = section[0].ib0 = i; section[2].ia1 = section[2].ib1 = (i+2) & 3; if (vx[(i+1) & 3] < vx[(i+3) & 3]) { section[0].ia1 = section[2].ia0 = (i+1) & 3; section[0].ib1 = section[2].ib0 = (i+3) & 3; } else { section[0].ia1 = section[2].ia0 = (i+3) & 3; section[0].ib1 = section[2].ib0 = (i+1) & 3; } if (vy[(i+1) & 3] < vy[(i+3) & 3]) { section[1].y0 = imgCoordMungeLowerC(vy[(i+1) & 3], glyphMode); section[2].y0 = imgCoordMungeUpperC(vy[(i+3) & 3], glyphMode); if (vx[(i+1) & 3] < vx[(i+3) & 3]) { section[1].ia0 = (i+1) & 3; section[1].ia1 = (i+2) & 3; section[1].ib0 = i; section[1].ib1 = (i+3) & 3; } else { section[1].ia0 = i; section[1].ia1 = (i+3) & 3; section[1].ib0 = (i+1) & 3; section[1].ib1 = (i+2) & 3; } } else { section[1].y0 = imgCoordMungeLowerC(vy[(i+3) & 3], glyphMode); section[2].y0 = imgCoordMungeUpperC(vy[(i+1) & 3], glyphMode); if (vx[(i+1) & 3] < vx[(i+3) & 3]) { section[1].ia0 = i; section[1].ia1 = (i+1) & 3; section[1].ib0 = (i+3) & 3; section[1].ib1 = (i+2) & 3; } else { section[1].ia0 = (i+3) & 3; section[1].ia1 = (i+2) & 3; section[1].ib0 = i; section[1].ib1 = (i+1) & 3; } } section[0].y1 = section[1].y0 - 1; section[1].y1 = section[2].y0 - 1; nSections = 3; } for (i = 0; i < nSections; ++i) { section[i].xa0 = vx[section[i].ia0]; section[i].ya0 = vy[section[i].ia0]; section[i].xa1 = vx[section[i].ia1]; section[i].ya1 = vy[section[i].ia1]; section[i].xb0 = vx[section[i].ib0]; section[i].yb0 = vy[section[i].ib0]; section[i].xb1 = vx[section[i].ib1]; section[i].yb1 = vy[section[i].ib1]; section[i].dxdya = (section[i].xa1 - section[i].xa0) / (section[i].ya1 - section[i].ya0); section[i].dxdyb = (section[i].xb1 - section[i].xb0) / (section[i].yb1 - section[i].yb0); } pipeInit(&pipe, 0, 0, state->fillPattern, NULL, (Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse); if (vectorAntialias) { drawAAPixelInit(); } if (nSections == 1) { if (section[0].y0 == section[0].y1) { ++section[0].y1; clipRes = opClipRes = splashClipPartial; } } else { if (section[0].y0 == section[2].y1) { ++section[1].y1; clipRes = opClipRes = splashClipPartial; } } for (i = 0; i < nSections; ++i) { for (y = section[i].y0; y <= section[i].y1; ++y) { xa = imgCoordMungeLowerC(section[i].xa0 + ((SplashCoord)y + 0.5 - section[i].ya0) * section[i].dxdya, glyphMode); xb = imgCoordMungeUpperC(section[i].xb0 + ((SplashCoord)y + 0.5 - section[i].yb0) * section[i].dxdyb, glyphMode); if (xa == xb) { ++xb; } if (clipRes != splashClipAllInside) { clipRes2 = state->clip->testSpan(xa, xb - 1, y); } else { clipRes2 = clipRes; } for (x = xa; x < xb; ++x) { xx = splashFloor(((SplashCoord)x + 0.5 - mat[4]) * ir00 + ((SplashCoord)y + 0.5 - mat[5]) * ir10); yy = splashFloor(((SplashCoord)x + 0.5 - mat[4]) * ir01 + ((SplashCoord)y + 0.5 - mat[5]) * ir11); if (xx < 0) { xx = 0; } else if (xx >= scaledWidth) { xx = scaledWidth - 1; } if (yy < 0) { yy = 0; } else if (yy >= scaledHeight) { yy = scaledHeight - 1; } pipe.shape = scaledMask->data[yy * scaledWidth + xx]; if (vectorAntialias && clipRes2 != splashClipAllInside) { drawAAPixel(&pipe, x, y); } else { drawPixel(&pipe, x, y, clipRes2 == splashClipAllInside); } } } } delete scaledMask; }
void Splash::arbitraryTransformMask(SplashImageMaskSource src, void *srcData, int srcWidth, int srcHeight, SplashCoord *mat, GBool glyphMode) { SplashBitmap *scaledMask; SplashClipResult clipRes, clipRes2; SplashPipe pipe; int scaledWidth, scaledHeight, t0, t1; SplashCoord r00, r01, r10, r11, det, ir00, ir01, ir10, ir11; SplashCoord vx[4], vy[4]; int xMin, yMin, xMax, yMax; ImageSection section[3]; int nSections; int y, xa, xb, x, i, xx, yy; vx[0] = mat[4]; vy[0] = mat[5]; vx[1] = mat[2] + mat[4]; vy[1] = mat[3] + mat[5]; vx[2] = mat[0] + mat[2] + mat[4]; vy[2] = mat[1] + mat[3] + mat[5]; vx[3] = mat[0] + mat[4]; vy[3] = mat[1] + mat[5]; xMin = imgCoordMungeLowerC(vx[0], glyphMode); xMax = imgCoordMungeUpperC(vx[0], glyphMode); yMin = imgCoordMungeLowerC(vy[0], glyphMode); yMax = imgCoordMungeUpperC(vy[0], glyphMode); for (i = 1; i < 4; ++i) { t0 = imgCoordMungeLowerC(vx[i], glyphMode); if (t0 < xMin) { xMin = t0; } t0 = imgCoordMungeUpperC(vx[i], glyphMode); if (t0 > xMax) { xMax = t0; } t1 = imgCoordMungeLowerC(vy[i], glyphMode); if (t1 < yMin) { yMin = t1; } t1 = imgCoordMungeUpperC(vy[i], glyphMode); if (t1 > yMax) { yMax = t1; } } clipRes = state->clip->testRect(xMin, yMin, xMax - 1, yMax - 1); opClipRes = clipRes; if (clipRes == splashClipAllOutside) { return; } if (mat[0] >= 0) { t0 = imgCoordMungeUpperC(mat[0] + mat[4], glyphMode) - imgCoordMungeLowerC(mat[4], glyphMode); } else { t0 = imgCoordMungeUpperC(mat[4], glyphMode) - imgCoordMungeLowerC(mat[0] + mat[4], glyphMode); } if (mat[1] >= 0) { t1 = imgCoordMungeUpperC(mat[1] + mat[5], glyphMode) - imgCoordMungeLowerC(mat[5], glyphMode); } else { t1 = imgCoordMungeUpperC(mat[5], glyphMode) - imgCoordMungeLowerC(mat[1] + mat[5], glyphMode); } scaledWidth = t0 > t1 ? t0 : t1; if (mat[2] >= 0) { t0 = imgCoordMungeUpperC(mat[2] + mat[4], glyphMode) - imgCoordMungeLowerC(mat[4], glyphMode); } else { t0 = imgCoordMungeUpperC(mat[4], glyphMode) - imgCoordMungeLowerC(mat[2] + mat[4], glyphMode); } if (mat[3] >= 0) { t1 = imgCoordMungeUpperC(mat[3] + mat[5], glyphMode) - imgCoordMungeLowerC(mat[5], glyphMode); } else { t1 = imgCoordMungeUpperC(mat[5], glyphMode) - imgCoordMungeLowerC(mat[3] + mat[5], glyphMode); } scaledHeight = t0 > t1 ? t0 : t1; if (scaledWidth == 0) { scaledWidth = 1; } if (scaledHeight == 0) { scaledHeight = 1; } r00 = mat[0] / scaledWidth; r01 = mat[1] / scaledWidth; r10 = mat[2] / scaledHeight; r11 = mat[3] / scaledHeight; det = r00 * r11 - r01 * r10; if (splashAbs(det) < 1e-6) { return; } ir00 = r11 / det; ir01 = -r01 / det; ir10 = -r10 / det; ir11 = r00 / det; scaledMask = scaleMask(src, srcData, srcWidth, srcHeight, scaledWidth, scaledHeight); if (scaledMask->data == NULL) { error(errInternal, -1, "scaledMask->data is NULL in Splash::arbitraryTransformMask"); delete scaledMask; return; } i = (vy[2] <= vy[3]) ? 2 : 3; if (vy[1] <= vy[i]) { i = 1; } if (vy[0] < vy[i] || (i != 3 && vy[0] == vy[i])) { i = 0; } if (vy[i] == vy[(i+1) & 3]) { section[0].y0 = imgCoordMungeLowerC(vy[i], glyphMode); section[0].y1 = imgCoordMungeUpperC(vy[(i+2) & 3], glyphMode) - 1; if (vx[i] < vx[(i+1) & 3]) { section[0].ia0 = i; section[0].ia1 = (i+3) & 3; section[0].ib0 = (i+1) & 3; section[0].ib1 = (i+2) & 3; } else { section[0].ia0 = (i+1) & 3; section[0].ia1 = (i+2) & 3; section[0].ib0 = i; section[0].ib1 = (i+3) & 3; } nSections = 1; } else { section[0].y0 = imgCoordMungeLowerC(vy[i], glyphMode); section[2].y1 = imgCoordMungeUpperC(vy[(i+2) & 3], glyphMode) - 1; section[0].ia0 = section[0].ib0 = i; section[2].ia1 = section[2].ib1 = (i+2) & 3; if (vx[(i+1) & 3] < vx[(i+3) & 3]) { section[0].ia1 = section[2].ia0 = (i+1) & 3; section[0].ib1 = section[2].ib0 = (i+3) & 3; } else { section[0].ia1 = section[2].ia0 = (i+3) & 3; section[0].ib1 = section[2].ib0 = (i+1) & 3; } if (vy[(i+1) & 3] < vy[(i+3) & 3]) { section[1].y0 = imgCoordMungeLowerC(vy[(i+1) & 3], glyphMode); section[2].y0 = imgCoordMungeUpperC(vy[(i+3) & 3], glyphMode); if (vx[(i+1) & 3] < vx[(i+3) & 3]) { section[1].ia0 = (i+1) & 3; section[1].ia1 = (i+2) & 3; section[1].ib0 = i; section[1].ib1 = (i+3) & 3; } else { section[1].ia0 = i; section[1].ia1 = (i+3) & 3; section[1].ib0 = (i+1) & 3; section[1].ib1 = (i+2) & 3; } } else { section[1].y0 = imgCoordMungeLowerC(vy[(i+3) & 3], glyphMode); section[2].y0 = imgCoordMungeUpperC(vy[(i+1) & 3], glyphMode); if (vx[(i+1) & 3] < vx[(i+3) & 3]) { section[1].ia0 = i; section[1].ia1 = (i+1) & 3; section[1].ib0 = (i+3) & 3; section[1].ib1 = (i+2) & 3; } else { section[1].ia0 = (i+3) & 3; section[1].ia1 = (i+2) & 3; section[1].ib0 = i; section[1].ib1 = (i+1) & 3; } } section[0].y1 = section[1].y0 - 1; section[1].y1 = section[2].y0 - 1; nSections = 3; } for (i = 0; i < nSections; ++i) { section[i].xa0 = vx[section[i].ia0]; section[i].ya0 = vy[section[i].ia0]; section[i].xa1 = vx[section[i].ia1]; section[i].ya1 = vy[section[i].ia1]; section[i].xb0 = vx[section[i].ib0]; section[i].yb0 = vy[section[i].ib0]; section[i].xb1 = vx[section[i].ib1]; section[i].yb1 = vy[section[i].ib1]; section[i].dxdya = (section[i].xa1 - section[i].xa0) / (section[i].ya1 - section[i].ya0); section[i].dxdyb = (section[i].xb1 - section[i].xb0) / (section[i].yb1 - section[i].yb0); } pipeInit(&pipe, 0, 0, state->fillPattern, NULL, (Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse); if (vectorAntialias) { drawAAPixelInit(); } if (nSections == 1) { if (section[0].y0 == section[0].y1) { ++section[0].y1; clipRes = opClipRes = splashClipPartial; } } else { if (section[0].y0 == section[2].y1) { ++section[1].y1; clipRes = opClipRes = splashClipPartial; } } for (i = 0; i < nSections; ++i) { for (y = section[i].y0; y <= section[i].y1; ++y) { xa = imgCoordMungeLowerC(section[i].xa0 + ((SplashCoord)y + 0.5 - section[i].ya0) * section[i].dxdya, glyphMode); xb = imgCoordMungeUpperC(section[i].xb0 + ((SplashCoord)y + 0.5 - section[i].yb0) * section[i].dxdyb, glyphMode); if (xa == xb) { ++xb; } if (clipRes != splashClipAllInside) { clipRes2 = state->clip->testSpan(xa, xb - 1, y); } else { clipRes2 = clipRes; } for (x = xa; x < xb; ++x) { xx = splashFloor(((SplashCoord)x + 0.5 - mat[4]) * ir00 + ((SplashCoord)y + 0.5 - mat[5]) * ir10); yy = splashFloor(((SplashCoord)x + 0.5 - mat[4]) * ir01 + ((SplashCoord)y + 0.5 - mat[5]) * ir11); if (xx < 0) { xx = 0; } else if (xx >= scaledWidth) { xx = scaledWidth - 1; } if (yy < 0) { yy = 0; } else if (yy >= scaledHeight) { yy = scaledHeight - 1; } pipe.shape = scaledMask->data[yy * scaledWidth + xx]; if (vectorAntialias && clipRes2 != splashClipAllInside) { drawAAPixel(&pipe, x, y); } else { drawPixel(&pipe, x, y, clipRes2 == splashClipAllInside); } } } } delete scaledMask; }
CPP
poppler
0
CVE-2016-5219
https://www.cvedetails.com/cve/CVE-2016-5219/
CWE-416
https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae
a4150b688a754d3d10d2ca385155b1c95d77d6ae
Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM This makes the query of GL_COMPLETION_STATUS_KHR to programs much cheaper by minimizing the round-trip to the GPU thread. Bug: 881152, 957001 Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630 Commit-Queue: Kenneth Russell <[email protected]> Reviewed-by: Kentaro Hara <[email protected]> Reviewed-by: Geoff Lang <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Cr-Commit-Position: refs/heads/master@{#657568}
error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM( uint32_t immediate_data_size, const volatile void* cmd_data) { const volatile gles2::cmds::RequestExtensionCHROMIUM& c = *static_cast<const volatile gles2::cmds::RequestExtensionCHROMIUM*>( cmd_data); Bucket* bucket = GetBucket(c.bucket_id); if (!bucket || bucket->size() == 0) { return error::kInvalidArguments; } std::string feature_str; if (!bucket->GetAsString(&feature_str)) { return error::kInvalidArguments; } feature_str = feature_str + " "; bool desire_standard_derivatives = false; bool desire_frag_depth = false; bool desire_draw_buffers = false; bool desire_shader_texture_lod = false; bool desire_multi_draw = false; bool desire_multi_draw_instanced = false; if (feature_info_->context_type() == CONTEXT_TYPE_WEBGL1) { desire_standard_derivatives = feature_str.find("GL_OES_standard_derivatives ") != std::string::npos; desire_frag_depth = feature_str.find("GL_EXT_frag_depth ") != std::string::npos; desire_draw_buffers = feature_str.find("GL_EXT_draw_buffers ") != std::string::npos; desire_shader_texture_lod = feature_str.find("GL_EXT_shader_texture_lod ") != std::string::npos; } if (feature_info_->IsWebGLContext()) { desire_multi_draw = feature_str.find("GL_WEBGL_multi_draw ") != std::string::npos; desire_multi_draw_instanced = feature_str.find("GL_WEBGL_multi_draw_instanced ") != std::string::npos; } if (desire_standard_derivatives != derivatives_explicitly_enabled_ || desire_frag_depth != frag_depth_explicitly_enabled_ || desire_draw_buffers != draw_buffers_explicitly_enabled_ || desire_shader_texture_lod != shader_texture_lod_explicitly_enabled_ || desire_multi_draw != multi_draw_explicitly_enabled_ || desire_multi_draw_instanced != multi_draw_instanced_explicitly_enabled_) { derivatives_explicitly_enabled_ |= desire_standard_derivatives; frag_depth_explicitly_enabled_ |= desire_frag_depth; draw_buffers_explicitly_enabled_ |= desire_draw_buffers; shader_texture_lod_explicitly_enabled_ |= desire_shader_texture_lod; multi_draw_explicitly_enabled_ |= desire_multi_draw; multi_draw_instanced_explicitly_enabled_ |= desire_multi_draw_instanced; DestroyShaderTranslator(); } if (feature_str.find("GL_CHROMIUM_color_buffer_float_rgba ") != std::string::npos) { feature_info_->EnableCHROMIUMColorBufferFloatRGBA(); } if (feature_str.find("GL_CHROMIUM_color_buffer_float_rgb ") != std::string::npos) { feature_info_->EnableCHROMIUMColorBufferFloatRGB(); } if (feature_str.find("GL_EXT_color_buffer_float ") != std::string::npos) { feature_info_->EnableEXTColorBufferFloat(); } if (feature_str.find("GL_EXT_color_buffer_half_float ") != std::string::npos) { feature_info_->EnableEXTColorBufferHalfFloat(); } if (feature_str.find("GL_OES_texture_float_linear ") != std::string::npos) { feature_info_->EnableOESTextureFloatLinear(); } if (feature_str.find("GL_OES_texture_half_float_linear ") != std::string::npos) { feature_info_->EnableOESTextureHalfFloatLinear(); } if (feature_str.find("GL_EXT_float_blend ") != std::string::npos) { feature_info_->EnableEXTFloatBlend(); } UpdateCapabilities(); return error::kNoError; }
error::Error GLES2DecoderImpl::HandleRequestExtensionCHROMIUM( uint32_t immediate_data_size, const volatile void* cmd_data) { const volatile gles2::cmds::RequestExtensionCHROMIUM& c = *static_cast<const volatile gles2::cmds::RequestExtensionCHROMIUM*>( cmd_data); Bucket* bucket = GetBucket(c.bucket_id); if (!bucket || bucket->size() == 0) { return error::kInvalidArguments; } std::string feature_str; if (!bucket->GetAsString(&feature_str)) { return error::kInvalidArguments; } feature_str = feature_str + " "; bool desire_standard_derivatives = false; bool desire_frag_depth = false; bool desire_draw_buffers = false; bool desire_shader_texture_lod = false; bool desire_multi_draw = false; bool desire_multi_draw_instanced = false; if (feature_info_->context_type() == CONTEXT_TYPE_WEBGL1) { desire_standard_derivatives = feature_str.find("GL_OES_standard_derivatives ") != std::string::npos; desire_frag_depth = feature_str.find("GL_EXT_frag_depth ") != std::string::npos; desire_draw_buffers = feature_str.find("GL_EXT_draw_buffers ") != std::string::npos; desire_shader_texture_lod = feature_str.find("GL_EXT_shader_texture_lod ") != std::string::npos; } if (feature_info_->IsWebGLContext()) { desire_multi_draw = feature_str.find("GL_WEBGL_multi_draw ") != std::string::npos; desire_multi_draw_instanced = feature_str.find("GL_WEBGL_multi_draw_instanced ") != std::string::npos; } if (desire_standard_derivatives != derivatives_explicitly_enabled_ || desire_frag_depth != frag_depth_explicitly_enabled_ || desire_draw_buffers != draw_buffers_explicitly_enabled_ || desire_shader_texture_lod != shader_texture_lod_explicitly_enabled_ || desire_multi_draw != multi_draw_explicitly_enabled_ || desire_multi_draw_instanced != multi_draw_instanced_explicitly_enabled_) { derivatives_explicitly_enabled_ |= desire_standard_derivatives; frag_depth_explicitly_enabled_ |= desire_frag_depth; draw_buffers_explicitly_enabled_ |= desire_draw_buffers; shader_texture_lod_explicitly_enabled_ |= desire_shader_texture_lod; multi_draw_explicitly_enabled_ |= desire_multi_draw; multi_draw_instanced_explicitly_enabled_ |= desire_multi_draw_instanced; DestroyShaderTranslator(); } if (feature_str.find("GL_CHROMIUM_color_buffer_float_rgba ") != std::string::npos) { feature_info_->EnableCHROMIUMColorBufferFloatRGBA(); } if (feature_str.find("GL_CHROMIUM_color_buffer_float_rgb ") != std::string::npos) { feature_info_->EnableCHROMIUMColorBufferFloatRGB(); } if (feature_str.find("GL_EXT_color_buffer_float ") != std::string::npos) { feature_info_->EnableEXTColorBufferFloat(); } if (feature_str.find("GL_EXT_color_buffer_half_float ") != std::string::npos) { feature_info_->EnableEXTColorBufferHalfFloat(); } if (feature_str.find("GL_OES_texture_float_linear ") != std::string::npos) { feature_info_->EnableOESTextureFloatLinear(); } if (feature_str.find("GL_OES_texture_half_float_linear ") != std::string::npos) { feature_info_->EnableOESTextureHalfFloatLinear(); } if (feature_str.find("GL_EXT_float_blend ") != std::string::npos) { feature_info_->EnableEXTFloatBlend(); } UpdateCapabilities(); return error::kNoError; }
C
Chrome
0
CVE-2017-18255
https://www.cvedetails.com/cve/CVE-2017-18255/
CWE-190
https://github.com/torvalds/linux/commit/1572e45a924f254d9570093abde46430c3172e3d
1572e45a924f254d9570093abde46430c3172e3d
perf/core: Fix the perf_cpu_time_max_percent check Use "proc_dointvec_minmax" instead of "proc_dointvec" to check the input value from user-space. If not, we can set a big value and some vars will overflow like "sysctl_perf_event_sample_rate" which will cause a lot of unexpected problems. Signed-off-by: Tan Xiaojun <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: <[email protected]> Cc: <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
__perf_event_account_interrupt(struct perf_event *event, int throttle) { struct hw_perf_event *hwc = &event->hw; int ret = 0; u64 seq; seq = __this_cpu_read(perf_throttled_seq); if (seq != hwc->interrupts_seq) { hwc->interrupts_seq = seq; hwc->interrupts = 1; } else { hwc->interrupts++; if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) { __this_cpu_inc(perf_throttled_count); tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(event, 0); ret = 1; } } if (event->attr.freq) { u64 now = perf_clock(); s64 delta = now - hwc->freq_time_stamp; hwc->freq_time_stamp = now; if (delta > 0 && delta < 2*TICK_NSEC) perf_adjust_period(event, delta, hwc->last_period, true); } return ret; }
__perf_event_account_interrupt(struct perf_event *event, int throttle) { struct hw_perf_event *hwc = &event->hw; int ret = 0; u64 seq; seq = __this_cpu_read(perf_throttled_seq); if (seq != hwc->interrupts_seq) { hwc->interrupts_seq = seq; hwc->interrupts = 1; } else { hwc->interrupts++; if (unlikely(throttle && hwc->interrupts >= max_samples_per_tick)) { __this_cpu_inc(perf_throttled_count); tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS); hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(event, 0); ret = 1; } } if (event->attr.freq) { u64 now = perf_clock(); s64 delta = now - hwc->freq_time_stamp; hwc->freq_time_stamp = now; if (delta > 0 && delta < 2*TICK_NSEC) perf_adjust_period(event, delta, hwc->last_period, true); } return ret; }
C
linux
0
CVE-2015-6768
https://www.cvedetails.com/cve/CVE-2015-6768/
CWE-264
https://github.com/chromium/chromium/commit/4c8b008f055f79e622344627fed7f820375a4f01
4c8b008f055f79e622344627fed7f820375a4f01
Change Document::detach() to RELEASE_ASSERT all subframes are gone. BUG=556724,577105 Review URL: https://codereview.chromium.org/1667573002 Cr-Commit-Position: refs/heads/master@{#373642}
ScriptedIdleTaskController& Document::ensureScriptedIdleTaskController() { if (!m_scriptedIdleTaskController) m_scriptedIdleTaskController = ScriptedIdleTaskController::create(this); return *m_scriptedIdleTaskController; }
ScriptedIdleTaskController& Document::ensureScriptedIdleTaskController() { if (!m_scriptedIdleTaskController) m_scriptedIdleTaskController = ScriptedIdleTaskController::create(this); return *m_scriptedIdleTaskController; }
C
Chrome
0
CVE-2017-5112
https://www.cvedetails.com/cve/CVE-2017-5112/
CWE-119
https://github.com/chromium/chromium/commit/f6ac1dba5e36f338a490752a2cbef3339096d9fe
f6ac1dba5e36f338a490752a2cbef3339096d9fe
Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later. BUG=740603 TEST=new conformance test [email protected],[email protected] Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4 Reviewed-on: https://chromium-review.googlesource.com/570840 Reviewed-by: Antoine Labour <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Commit-Queue: Zhenyao Mo <[email protected]> Cr-Commit-Position: refs/heads/master@{#486518}
void WebGLRenderingContextBase::texSubImage2D( ExecutionContext* execution_context, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLenum format, GLenum type, HTMLCanvasElement* canvas, ExceptionState& exception_state) { TexImageHelperHTMLCanvasElement( execution_context->GetSecurityOrigin(), kTexSubImage2D, target, level, 0, format, type, xoffset, yoffset, 0, canvas, GetTextureSourceSize(canvas), 1, 0, exception_state); }
void WebGLRenderingContextBase::texSubImage2D( ExecutionContext* execution_context, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLenum format, GLenum type, HTMLCanvasElement* canvas, ExceptionState& exception_state) { TexImageHelperHTMLCanvasElement( execution_context->GetSecurityOrigin(), kTexSubImage2D, target, level, 0, format, type, xoffset, yoffset, 0, canvas, GetTextureSourceSize(canvas), 1, 0, exception_state); }
C
Chrome
0
CVE-2018-18347
https://www.cvedetails.com/cve/CVE-2018-18347/
CWE-20
https://github.com/chromium/chromium/commit/0aa576040704401ae28ea73b862d0b5d84262d51
0aa576040704401ae28ea73b862d0b5d84262d51
Don't preserve NavigationEntry for failed navigations with invalid URLs. The formatting logic may rewrite such URLs into an unsafe state. This is a first step before preventing navigations to invalid URLs entirely. Bug: 850824 Change-Id: I71743bfb4b610d55ce901ee8902125f934a2bb23 Reviewed-on: https://chromium-review.googlesource.com/c/1252942 Reviewed-by: Alex Moshchuk <[email protected]> Commit-Queue: Charlie Reis <[email protected]> Cr-Commit-Position: refs/heads/master@{#597304}
NavigatorImpl::NavigatorImpl(NavigationControllerImpl* navigation_controller, NavigatorDelegate* delegate) : controller_(navigation_controller), delegate_(delegate) {}
NavigatorImpl::NavigatorImpl(NavigationControllerImpl* navigation_controller, NavigatorDelegate* delegate) : controller_(navigation_controller), delegate_(delegate) {}
C
Chrome
0
CVE-2013-2884
https://www.cvedetails.com/cve/CVE-2013-2884/
CWE-399
https://github.com/chromium/chromium/commit/4ac8bc08e3306f38a5ab3e551aef6ad43753579c
4ac8bc08e3306f38a5ab3e551aef6ad43753579c
Set Attr.ownerDocument in Element#setAttributeNode() Attr objects can move across documents by setAttributeNode(). So It needs to reset ownerDocument through TreeScopeAdoptr::adoptIfNeeded(). BUG=248950 TEST=set-attribute-node-from-iframe.html Review URL: https://chromiumcodereview.appspot.com/17583003 git-svn-id: svn://svn.chromium.org/blink/trunk@152938 bbb929c8-8fbe-4397-9dbb-9b2b20218538
KURL Element::getURLAttribute(const QualifiedName& name) const { #if !ASSERT_DISABLED if (elementData()) { if (const Attribute* attribute = getAttributeItem(name)) ASSERT(isURLAttribute(*attribute)); } #endif return document()->completeURL(stripLeadingAndTrailingHTMLSpaces(getAttribute(name))); }
KURL Element::getURLAttribute(const QualifiedName& name) const { #if !ASSERT_DISABLED if (elementData()) { if (const Attribute* attribute = getAttributeItem(name)) ASSERT(isURLAttribute(*attribute)); } #endif return document()->completeURL(stripLeadingAndTrailingHTMLSpaces(getAttribute(name))); }
C
Chrome
0
CVE-2014-3610
https://www.cvedetails.com/cve/CVE-2014-3610/
CWE-264
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: [email protected] Signed-off-by: Nadav Amit <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static int mwait_interception(struct vcpu_svm *svm) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return nop_interception(svm); }
static int mwait_interception(struct vcpu_svm *svm) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return nop_interception(svm); }
C
linux
0
CVE-2012-2875
https://www.cvedetails.com/cve/CVE-2012-2875/
null
https://github.com/chromium/chromium/commit/d345af9ed62ee5f431be327967f41c3cc3fe936a
d345af9ed62ee5f431be327967f41c3cc3fe936a
[BlackBerry] Adapt to new BlackBerry::Platform::TouchPoint API https://bugs.webkit.org/show_bug.cgi?id=105143 RIM PR 171941 Reviewed by Rob Buis. Internally reviewed by George Staikos. Source/WebCore: TouchPoint instances now provide document coordinates for the viewport and content position of the touch event. The pixel coordinates stored in the TouchPoint should no longer be needed in WebKit. Also adapt to new method names and encapsulation of TouchPoint data members. No change in behavior, no new tests. * platform/blackberry/PlatformTouchPointBlackBerry.cpp: (WebCore::PlatformTouchPoint::PlatformTouchPoint): Source/WebKit/blackberry: TouchPoint instances now provide document coordinates for the viewport and content position of the touch event. The pixel coordinates stored in the TouchPoint should no longer be needed in WebKit. One exception is when passing events to a full screen plugin. Also adapt to new method names and encapsulation of TouchPoint data members. * Api/WebPage.cpp: (BlackBerry::WebKit::WebPage::touchEvent): (BlackBerry::WebKit::WebPage::touchPointAsMouseEvent): (BlackBerry::WebKit::WebPagePrivate::dispatchTouchEventToFullScreenPlugin): (BlackBerry::WebKit::WebPagePrivate::dispatchTouchPointAsMouseEventToFullScreenPlugin): * WebKitSupport/InputHandler.cpp: (BlackBerry::WebKit::InputHandler::shouldRequestSpellCheckingOptionsForPoint): * WebKitSupport/InputHandler.h: (InputHandler): * WebKitSupport/TouchEventHandler.cpp: (BlackBerry::WebKit::TouchEventHandler::doFatFingers): (BlackBerry::WebKit::TouchEventHandler::handleTouchPoint): * WebKitSupport/TouchEventHandler.h: (TouchEventHandler): Tools: Adapt to new method names and encapsulation of TouchPoint data members. * DumpRenderTree/blackberry/EventSender.cpp: (addTouchPointCallback): (updateTouchPointCallback): (touchEndCallback): (releaseTouchPointCallback): (sendTouchEvent): git-svn-id: svn://svn.chromium.org/blink/trunk@137880 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void WebPagePrivate::selectionChanged(Frame* frame) { m_inputHandler->selectionChanged(); m_page->focusController()->setFocusedFrame(frame); }
void WebPagePrivate::selectionChanged(Frame* frame) { m_inputHandler->selectionChanged(); m_page->focusController()->setFocusedFrame(frame); }
C
Chrome
0
CVE-2017-5032
https://www.cvedetails.com/cve/CVE-2017-5032/
CWE-787
https://github.com/chromium/chromium/commit/9c90f2cec381a0460e3879eb8efd14bac4488dbe
9c90f2cec381a0460e3879eb8efd14bac4488dbe
Ignore updatePipBounds before initial bounds is set When PIP enter/exit transition happens, window state change and initial bounds change are committed in the same commit. However, as state change is applied first in OnPreWidgetCommit and the bounds is update later, if updatePipBounds is called between the gap, it ends up returning a wrong bounds based on the previous bounds. Currently, there are two callstacks that end up triggering updatePipBounds between the gap: (i) The state change causes OnWindowAddedToLayout and updatePipBounds is called in OnWMEvent, (ii) updatePipBounds is called in UpdatePipState to prevent it from being placed under some system ui. As it doesn't make sense to call updatePipBounds before the first bounds is not set, this CL adds a boolean to defer updatePipBounds. position. Bug: b130782006 Test: Got VLC into PIP and confirmed it was placed at the correct Change-Id: I5b9f3644bfb2533fd3f905bc09d49708a5d08a90 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1578719 Commit-Queue: Kazuki Takise <[email protected]> Auto-Submit: Kazuki Takise <[email protected]> Reviewed-by: Mitsuru Oshima <[email protected]> Cr-Commit-Position: refs/heads/master@{#668724}
ash::NonClientFrameViewAsh* ClientControlledShellSurface::GetFrameView() { return static_cast<ash::NonClientFrameViewAsh*>( widget_->non_client_view()->frame_view()); }
ash::NonClientFrameViewAsh* ClientControlledShellSurface::GetFrameView() { return static_cast<ash::NonClientFrameViewAsh*>( widget_->non_client_view()->frame_view()); }
C
Chrome
0
CVE-2017-5120
https://www.cvedetails.com/cve/CVE-2017-5120/
null
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
b7277af490d28ac7f802c015bb0ff31395768556
bindings: Support "attribute FrozenArray<T>?" Adds a quick hack to support a case of "attribute FrozenArray<T>?". Bug: 1028047 Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866 Reviewed-by: Hitoshi Yoshida <[email protected]> Commit-Queue: Yuki Shiino <[email protected]> Cr-Commit-Position: refs/heads/master@{#718676}
void V8TestObject::RaisesExceptionXPathNSResolverVoidMethodMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info) { RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_raisesExceptionXPathNSResolverVoidMethod"); test_object_v8_internal::RaisesExceptionXPathNSResolverVoidMethodMethod(info); }
void V8TestObject::RaisesExceptionXPathNSResolverVoidMethodMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info) { RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_raisesExceptionXPathNSResolverVoidMethod"); test_object_v8_internal::RaisesExceptionXPathNSResolverVoidMethodMethod(info); }
C
Chrome
0
CVE-2015-6779
https://www.cvedetails.com/cve/CVE-2015-6779/
CWE-264
https://github.com/chromium/chromium/commit/1eefa26e1795192c5a347a1e1e7a99e88c47f9c4
1eefa26e1795192c5a347a1e1e7a99e88c47f9c4
This patch implements a mechanism for more granular link URL permissions (filtering on scheme/host). This fixes the bug that allowed PDFs to have working links to any "chrome://" URLs. BUG=528505,226927 Review URL: https://codereview.chromium.org/1362433002 Cr-Commit-Position: refs/heads/master@{#351705}
bool GetGuestCallback(content::WebContents** guest_out, content::WebContents* guest) { EXPECT_FALSE(*guest_out); *guest_out = guest; return false; }
bool GetGuestCallback(content::WebContents** guest_out, content::WebContents* guest) { EXPECT_FALSE(*guest_out); *guest_out = guest; return false; }
C
Chrome
0
CVE-2014-3173
https://www.cvedetails.com/cve/CVE-2014-3173/
CWE-119
https://github.com/chromium/chromium/commit/ee7579229ff7e9e5ae28bf53aea069251499d7da
ee7579229ff7e9e5ae28bf53aea069251499d7da
Framebuffer clear() needs to consider the situation some draw buffers are disabled. This is when we expose DrawBuffers extension. BUG=376951 TEST=the attached test case, webgl conformance [email protected],[email protected] Review URL: https://codereview.chromium.org/315283002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@275338 0039d316-1c4b-4281-b951-d872f2087c98
TextureRef* CreateTexture( GLuint client_id, GLuint service_id) { return texture_manager()->CreateTexture(client_id, service_id); }
TextureRef* CreateTexture( GLuint client_id, GLuint service_id) { return texture_manager()->CreateTexture(client_id, service_id); }
C
Chrome
0
CVE-2017-15423
https://www.cvedetails.com/cve/CVE-2017-15423/
CWE-310
https://github.com/chromium/chromium/commit/a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
Roll src/third_party/boringssl/src 664e99a64..696c13bd6 https://boringssl.googlesource.com/boringssl/+log/664e99a6486c293728097c661332f92bf2d847c6..696c13bd6ab78011adfe7b775519c8b7cc82b604 BUG=778101 Change-Id: I8dda4f3db952597148e3c7937319584698d00e1c Reviewed-on: https://chromium-review.googlesource.com/747941 Reviewed-by: Avi Drissman <[email protected]> Reviewed-by: David Benjamin <[email protected]> Commit-Queue: Steven Valdez <[email protected]> Cr-Commit-Position: refs/heads/master@{#513774}
RenderThreadImpl::CreateExternalBeginFrameSource(int routing_id) { return std::make_unique<CompositorExternalBeginFrameSource>( compositor_message_filter_.get(), sync_message_filter(), routing_id); }
RenderThreadImpl::CreateExternalBeginFrameSource(int routing_id) { return std::make_unique<CompositorExternalBeginFrameSource>( compositor_message_filter_.get(), sync_message_filter(), routing_id); }
C
Chrome
0
CVE-2013-6371
https://www.cvedetails.com/cve/CVE-2013-6371/
CWE-310
https://github.com/json-c/json-c/commit/64e36901a0614bf64a19bc3396469c66dcd0b015
64e36901a0614bf64a19bc3396469c66dcd0b015
Patch to address the following issues: * CVE-2013-6371: hash collision denial of service * CVE-2013-6370: buffer overflow if size_t is larger than int
static void json_tokener_reset_level(struct json_tokener *tok, int depth) { tok->stack[depth].state = json_tokener_state_eatws; tok->stack[depth].saved_state = json_tokener_state_start; json_object_put(tok->stack[depth].current); tok->stack[depth].current = NULL; free(tok->stack[depth].obj_field_name); tok->stack[depth].obj_field_name = NULL; }
static void json_tokener_reset_level(struct json_tokener *tok, int depth) { tok->stack[depth].state = json_tokener_state_eatws; tok->stack[depth].saved_state = json_tokener_state_start; json_object_put(tok->stack[depth].current); tok->stack[depth].current = NULL; free(tok->stack[depth].obj_field_name); tok->stack[depth].obj_field_name = NULL; }
C
json-c
0
CVE-2017-6001
https://www.cvedetails.com/cve/CVE-2017-6001/
CWE-362
https://github.com/torvalds/linux/commit/321027c1fe77f892f4ea07846aeae08cefbbb290
321027c1fe77f892f4ea07846aeae08cefbbb290
perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race Di Shen reported a race between two concurrent sys_perf_event_open() calls where both try and move the same pre-existing software group into a hardware context. The problem is exactly that described in commit: f63a8daa5812 ("perf: Fix event->ctx locking") ... where, while we wait for a ctx->mutex acquisition, the event->ctx relation can have changed under us. That very same commit failed to recognise sys_perf_event_context() as an external access vector to the events and thereby didn't apply the established locking rules correctly. So while one sys_perf_event_open() call is stuck waiting on mutex_lock_double(), the other (which owns said locks) moves the group about. So by the time the former sys_perf_event_open() acquires the locks, the context we've acquired is stale (and possibly dead). Apply the established locking rules as per perf_event_ctx_lock_nested() to the mutex_lock_double() for the 'move_group' case. This obviously means we need to validate state after we acquire the locks. Reported-by: Di Shen (Keen Lab) Tested-by: John Dias <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Kees Cook <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Min Chong <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Fixes: f63a8daa5812 ("perf: Fix event->ctx locking") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static void perf_event_context_sched_out(struct task_struct *task, int ctxn, struct task_struct *next) { struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; struct perf_event_context *next_ctx; struct perf_event_context *parent, *next_parent; struct perf_cpu_context *cpuctx; int do_switch = 1; if (likely(!ctx)) return; cpuctx = __get_cpu_context(ctx); if (!cpuctx->task_ctx) return; rcu_read_lock(); next_ctx = next->perf_event_ctxp[ctxn]; if (!next_ctx) goto unlock; parent = rcu_dereference(ctx->parent_ctx); next_parent = rcu_dereference(next_ctx->parent_ctx); /* If neither context have a parent context; they cannot be clones. */ if (!parent && !next_parent) goto unlock; if (next_parent == ctx || next_ctx == parent || next_parent == parent) { /* * Looks like the two contexts are clones, so we might be * able to optimize the context switch. We lock both * contexts and check that they are clones under the * lock (including re-checking that neither has been * uncloned in the meantime). It doesn't matter which * order we take the locks because no other cpu could * be trying to lock both of these tasks. */ raw_spin_lock(&ctx->lock); raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { WRITE_ONCE(ctx->task, next); WRITE_ONCE(next_ctx->task, task); swap(ctx->task_ctx_data, next_ctx->task_ctx_data); /* * RCU_INIT_POINTER here is safe because we've not * modified the ctx and the above modification of * ctx->task and ctx->task_ctx_data are immaterial * since those values are always verified under * ctx->lock which we're now holding. */ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx); do_switch = 0; perf_event_sync_stat(ctx, next_ctx); } raw_spin_unlock(&next_ctx->lock); raw_spin_unlock(&ctx->lock); } unlock: rcu_read_unlock(); if (do_switch) { raw_spin_lock(&ctx->lock); task_ctx_sched_out(cpuctx, ctx); raw_spin_unlock(&ctx->lock); } }
static void perf_event_context_sched_out(struct task_struct *task, int ctxn, struct task_struct *next) { struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; struct perf_event_context *next_ctx; struct perf_event_context *parent, *next_parent; struct perf_cpu_context *cpuctx; int do_switch = 1; if (likely(!ctx)) return; cpuctx = __get_cpu_context(ctx); if (!cpuctx->task_ctx) return; rcu_read_lock(); next_ctx = next->perf_event_ctxp[ctxn]; if (!next_ctx) goto unlock; parent = rcu_dereference(ctx->parent_ctx); next_parent = rcu_dereference(next_ctx->parent_ctx); /* If neither context have a parent context; they cannot be clones. */ if (!parent && !next_parent) goto unlock; if (next_parent == ctx || next_ctx == parent || next_parent == parent) { /* * Looks like the two contexts are clones, so we might be * able to optimize the context switch. We lock both * contexts and check that they are clones under the * lock (including re-checking that neither has been * uncloned in the meantime). It doesn't matter which * order we take the locks because no other cpu could * be trying to lock both of these tasks. */ raw_spin_lock(&ctx->lock); raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { WRITE_ONCE(ctx->task, next); WRITE_ONCE(next_ctx->task, task); swap(ctx->task_ctx_data, next_ctx->task_ctx_data); /* * RCU_INIT_POINTER here is safe because we've not * modified the ctx and the above modification of * ctx->task and ctx->task_ctx_data are immaterial * since those values are always verified under * ctx->lock which we're now holding. */ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx); do_switch = 0; perf_event_sync_stat(ctx, next_ctx); } raw_spin_unlock(&next_ctx->lock); raw_spin_unlock(&ctx->lock); } unlock: rcu_read_unlock(); if (do_switch) { raw_spin_lock(&ctx->lock); task_ctx_sched_out(cpuctx, ctx); raw_spin_unlock(&ctx->lock); } }
C
linux
0
CVE-2013-0917
https://www.cvedetails.com/cve/CVE-2013-0917/
CWE-119
https://github.com/chromium/chromium/commit/02c8303512ebed345011f7b545e2f418799be2f0
02c8303512ebed345011f7b545e2f418799be2f0
Oilpan: Ship Oilpan for SyncCallbackHelper, CreateFileResult and CallbackWrapper in filesystem/ These are leftovers when we shipped Oilpan for filesystem/ once. BUG=340522 Review URL: https://codereview.chromium.org/501263003 git-svn-id: svn://svn.chromium.org/blink/trunk@180909 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void provideLocalFileSystemToWorker(WorkerClients* clients, PassOwnPtr<FileSystemClient> client) { clients->provideSupplement(LocalFileSystem::supplementName(), LocalFileSystem::create(client)); }
void provideLocalFileSystemToWorker(WorkerClients* clients, PassOwnPtr<FileSystemClient> client) { clients->provideSupplement(LocalFileSystem::supplementName(), LocalFileSystem::create(client)); }
C
Chrome
0
CVE-2016-10066
https://www.cvedetails.com/cve/CVE-2016-10066/
CWE-119
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
null
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); } return (LayerInfo *) RelinquishMagickMemory(layer_info); }
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); } return (LayerInfo *) RelinquishMagickMemory(layer_info); }
C
ImageMagick
0
CVE-2017-18222
https://www.cvedetails.com/cve/CVE-2017-18222/
CWE-119
https://github.com/torvalds/linux/commit/412b65d15a7f8a93794653968308fc100f2aa87c
412b65d15a7f8a93794653968308fc100f2aa87c
net: hns: fix ethtool_get_strings overflow in hns driver hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated is not enough for ethtool_get_strings(), which will cause random memory corruption. When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the the following can be observed without this patch: [ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80 [ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070. [ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70) [ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk [ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k [ 43.115218] Next obj: start=ffff801fb0b69098, len=80 [ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b. [ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38) [ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_ [ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai Signed-off-by: Timmy Li <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, u16 *max_q_per_vf) { switch (dsaf_mode) { case DSAF_MODE_DISABLE_6PORT_0VM: *max_vfn = 1; *max_q_per_vf = 16; break; case DSAF_MODE_DISABLE_FIX: case DSAF_MODE_DISABLE_SP: *max_vfn = 1; *max_q_per_vf = 1; break; case DSAF_MODE_DISABLE_2PORT_64VM: *max_vfn = 64; *max_q_per_vf = 1; break; case DSAF_MODE_DISABLE_6PORT_16VM: *max_vfn = 16; *max_q_per_vf = 1; break; default: *max_vfn = 1; *max_q_per_vf = 16; break; } }
void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, u16 *max_q_per_vf) { switch (dsaf_mode) { case DSAF_MODE_DISABLE_6PORT_0VM: *max_vfn = 1; *max_q_per_vf = 16; break; case DSAF_MODE_DISABLE_FIX: case DSAF_MODE_DISABLE_SP: *max_vfn = 1; *max_q_per_vf = 1; break; case DSAF_MODE_DISABLE_2PORT_64VM: *max_vfn = 64; *max_q_per_vf = 1; break; case DSAF_MODE_DISABLE_6PORT_16VM: *max_vfn = 16; *max_q_per_vf = 1; break; default: *max_vfn = 1; *max_q_per_vf = 16; break; } }
C
linux
0
CVE-2014-2669
https://www.cvedetails.com/cve/CVE-2014-2669/
CWE-189
https://github.com/postgres/postgres/commit/31400a673325147e1205326008e32135a78b4d8a
31400a673325147e1205326008e32135a78b4d8a
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
circle_diameter(PG_FUNCTION_ARGS) { CIRCLE *circle = PG_GETARG_CIRCLE_P(0); PG_RETURN_FLOAT8(2 * circle->radius); }
circle_diameter(PG_FUNCTION_ARGS) { CIRCLE *circle = PG_GETARG_CIRCLE_P(0); PG_RETURN_FLOAT8(2 * circle->radius); }
C
postgres
0
CVE-2015-8382
https://www.cvedetails.com/cve/CVE-2015-8382/
CWE-119
https://git.php.net/?p=php-src.git;a=commit;h=c351b47ce85a3a147cfa801fa9f0149ab4160834
c351b47ce85a3a147cfa801fa9f0149ab4160834
null
PHPAPI void php_pcre_match_impl(pcre_cache_entry *pce, char *subject, int subject_len, zval *return_value, zval *subpats, int global, int use_flags, long flags, long start_offset TSRMLS_DC) { zval *result_set, /* Holds a set of subpatterns after a global match */ **match_sets = NULL; /* An array of sets of matches for each subpattern after a global match */ pcre_extra *extra = pce->extra;/* Holds results of studying */ pcre_extra extra_data; /* Used locally for exec options */ int exoptions = 0; /* Execution options */ int count = 0; /* Count of matched subpatterns */ int *offsets; /* Array of subpattern offsets */ int num_subpats; /* Number of captured subpatterns */ int size_offsets; /* Size of the offsets array */ int matched; /* Has anything matched */ int g_notempty = 0; /* If the match should not be empty */ const char **stringlist; /* Holds list of subpatterns */ char **subpat_names; /* Array for named subpatterns */ int i, rc; int subpats_order; /* Order of subpattern matches */ int offset_capture; /* Capture match offsets: yes/no */ /* Overwrite the passed-in value for subpatterns with an empty array. */ if (subpats != NULL) { zval_dtor(subpats); array_init(subpats); } subpats_order = global ? PREG_PATTERN_ORDER : 0; if (use_flags) { offset_capture = flags & PREG_OFFSET_CAPTURE; /* * subpats_order is pre-set to pattern mode so we change it only if * necessary. */ if (flags & 0xff) { subpats_order = flags & 0xff; } if ((global && (subpats_order < PREG_PATTERN_ORDER || subpats_order > PREG_SET_ORDER)) || (!global && subpats_order != 0)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid flags specified"); return; } } else { offset_capture = 0; } /* Negative offset counts from the end of the string. */ if (start_offset < 0) { start_offset = subject_len + start_offset; if (start_offset < 0) { start_offset = 0; } } if (extra == NULL) { extra_data.flags = PCRE_EXTRA_MATCH_LIMIT | PCRE_EXTRA_MATCH_LIMIT_RECURSION; extra = &extra_data; } extra->match_limit = PCRE_G(backtrack_limit); extra->match_limit_recursion = PCRE_G(recursion_limit); /* Calculate the size of the offsets array, and allocate memory for it. */ rc = pcre_fullinfo(pce->re, extra, PCRE_INFO_CAPTURECOUNT, &num_subpats); if (rc < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Internal pcre_fullinfo() error %d", rc); RETURN_FALSE; } num_subpats++; size_offsets = num_subpats * 3; /* * Build a mapping from subpattern numbers to their names. We will always * allocate the table, even though there may be no named subpatterns. This * avoids somewhat more complicated logic in the inner loops. */ subpat_names = make_subpats_table(num_subpats, pce TSRMLS_CC); if (!subpat_names) { RETURN_FALSE; } offsets = (int *)safe_emalloc(size_offsets, sizeof(int), 0); memset(offsets, 0, size_offsets*sizeof(int)); /* Allocate match sets array and initialize the values. */ if (global && subpats && subpats_order == PREG_PATTERN_ORDER) { match_sets = (zval **)safe_emalloc(num_subpats, sizeof(zval *), 0); for (i=0; i<num_subpats; i++) { ALLOC_ZVAL(match_sets[i]); array_init(match_sets[i]); INIT_PZVAL(match_sets[i]); } } matched = 0; PCRE_G(error_code) = PHP_PCRE_NO_ERROR; do { /* Execute the regular expression. */ count = pcre_exec(pce->re, extra, subject, subject_len, start_offset, exoptions|g_notempty, offsets, size_offsets); /* the string was already proved to be valid UTF-8 */ exoptions |= PCRE_NO_UTF8_CHECK; /* Check for too many substrings condition. */ if (count == 0) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Matched, but too many substrings"); count = size_offsets/3; } /* If something has matched */ if (count > 0) { matched++; /* If subpatterns array has been passed, fill it in with values. */ if (subpats != NULL) { /* Try to get the list of substrings and display a warning if failed. */ if (pcre_get_substring_list(subject, offsets, count, &stringlist) < 0) { efree(subpat_names); efree(offsets); if (match_sets) efree(match_sets); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Get subpatterns list failed"); RETURN_FALSE; } if (global) { /* global pattern matching */ if (subpats && subpats_order == PREG_PATTERN_ORDER) { /* For each subpattern, insert it into the appropriate array. */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(match_sets[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], NULL); } else { add_next_index_stringl(match_sets[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } /* * If the number of captured subpatterns on this run is * less than the total possible number, pad the result * arrays with empty strings. */ if (count < num_subpats) { for (; i < num_subpats; i++) { add_next_index_string(match_sets[i], "", 1); } } } else { /* Allocate the result set array */ ALLOC_ZVAL(result_set); array_init(result_set); INIT_PZVAL(result_set); /* Add all the subpatterns to it */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(result_set, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], subpat_names[i]); } else { if (subpat_names[i]) { add_assoc_stringl(result_set, subpat_names[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } add_next_index_stringl(result_set, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } /* And add it to the output array */ zend_hash_next_index_insert(Z_ARRVAL_P(subpats), &result_set, sizeof(zval *), NULL); } } else { /* single pattern matching */ /* For each subpattern, insert it into the subpatterns array. */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(subpats, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], subpat_names[i]); } else { if (subpat_names[i]) { add_assoc_stringl(subpats, subpat_names[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } add_next_index_stringl(subpats, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } } pcre_free((void *) stringlist); } } else if (count == PCRE_ERROR_NOMATCH) { /* If we previously set PCRE_NOTEMPTY after a null match, this is not necessarily the end. We need to advance the start offset, and continue. Fudge the offset values to achieve this, unless we're already at the end of the string. */ if (g_notempty != 0 && start_offset < subject_len) { offsets[0] = start_offset; offsets[1] = start_offset + 1; } else break; } else { pcre_handle_exec_error(count TSRMLS_CC); break; } /* If we have matched an empty string, mimic what Perl's /g options does. This turns out to be rather cunning. First we set PCRE_NOTEMPTY and try the match again at the same point. If this fails (picked up above) we advance to the next character. */ g_notempty = (offsets[1] == offsets[0])? PCRE_NOTEMPTY | PCRE_ANCHORED : 0; /* Advance to the position right after the last full match */ start_offset = offsets[1]; } while (global); /* Add the match sets to the output array and clean up */ if (global && subpats && subpats_order == PREG_PATTERN_ORDER) { for (i = 0; i < num_subpats; i++) { if (subpat_names[i]) { zend_hash_update(Z_ARRVAL_P(subpats), subpat_names[i], strlen(subpat_names[i])+1, &match_sets[i], sizeof(zval *), NULL); Z_ADDREF_P(match_sets[i]); } zend_hash_next_index_insert(Z_ARRVAL_P(subpats), &match_sets[i], sizeof(zval *), NULL); } efree(match_sets); } efree(offsets); efree(subpat_names); /* Did we encounter an error? */ if (PCRE_G(error_code) == PHP_PCRE_NO_ERROR) { RETVAL_LONG(matched); } else { RETVAL_FALSE; } }
PHPAPI void php_pcre_match_impl(pcre_cache_entry *pce, char *subject, int subject_len, zval *return_value, zval *subpats, int global, int use_flags, long flags, long start_offset TSRMLS_DC) { zval *result_set, /* Holds a set of subpatterns after a global match */ **match_sets = NULL; /* An array of sets of matches for each subpattern after a global match */ pcre_extra *extra = pce->extra;/* Holds results of studying */ pcre_extra extra_data; /* Used locally for exec options */ int exoptions = 0; /* Execution options */ int count = 0; /* Count of matched subpatterns */ int *offsets; /* Array of subpattern offsets */ int num_subpats; /* Number of captured subpatterns */ int size_offsets; /* Size of the offsets array */ int matched; /* Has anything matched */ int g_notempty = 0; /* If the match should not be empty */ const char **stringlist; /* Holds list of subpatterns */ char **subpat_names; /* Array for named subpatterns */ int i, rc; int subpats_order; /* Order of subpattern matches */ int offset_capture; /* Capture match offsets: yes/no */ /* Overwrite the passed-in value for subpatterns with an empty array. */ if (subpats != NULL) { zval_dtor(subpats); array_init(subpats); } subpats_order = global ? PREG_PATTERN_ORDER : 0; if (use_flags) { offset_capture = flags & PREG_OFFSET_CAPTURE; /* * subpats_order is pre-set to pattern mode so we change it only if * necessary. */ if (flags & 0xff) { subpats_order = flags & 0xff; } if ((global && (subpats_order < PREG_PATTERN_ORDER || subpats_order > PREG_SET_ORDER)) || (!global && subpats_order != 0)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid flags specified"); return; } } else { offset_capture = 0; } /* Negative offset counts from the end of the string. */ if (start_offset < 0) { start_offset = subject_len + start_offset; if (start_offset < 0) { start_offset = 0; } } if (extra == NULL) { extra_data.flags = PCRE_EXTRA_MATCH_LIMIT | PCRE_EXTRA_MATCH_LIMIT_RECURSION; extra = &extra_data; } extra->match_limit = PCRE_G(backtrack_limit); extra->match_limit_recursion = PCRE_G(recursion_limit); /* Calculate the size of the offsets array, and allocate memory for it. */ rc = pcre_fullinfo(pce->re, extra, PCRE_INFO_CAPTURECOUNT, &num_subpats); if (rc < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Internal pcre_fullinfo() error %d", rc); RETURN_FALSE; } num_subpats++; size_offsets = num_subpats * 3; /* * Build a mapping from subpattern numbers to their names. We will always * allocate the table, even though there may be no named subpatterns. This * avoids somewhat more complicated logic in the inner loops. */ subpat_names = make_subpats_table(num_subpats, pce TSRMLS_CC); if (!subpat_names) { RETURN_FALSE; } offsets = (int *)safe_emalloc(size_offsets, sizeof(int), 0); /* Allocate match sets array and initialize the values. */ if (global && subpats && subpats_order == PREG_PATTERN_ORDER) { match_sets = (zval **)safe_emalloc(num_subpats, sizeof(zval *), 0); for (i=0; i<num_subpats; i++) { ALLOC_ZVAL(match_sets[i]); array_init(match_sets[i]); INIT_PZVAL(match_sets[i]); } } matched = 0; PCRE_G(error_code) = PHP_PCRE_NO_ERROR; do { /* Execute the regular expression. */ count = pcre_exec(pce->re, extra, subject, subject_len, start_offset, exoptions|g_notempty, offsets, size_offsets); /* the string was already proved to be valid UTF-8 */ exoptions |= PCRE_NO_UTF8_CHECK; /* Check for too many substrings condition. */ if (count == 0) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Matched, but too many substrings"); count = size_offsets/3; } /* If something has matched */ if (count > 0) { matched++; /* If subpatterns array has been passed, fill it in with values. */ if (subpats != NULL) { /* Try to get the list of substrings and display a warning if failed. */ if (pcre_get_substring_list(subject, offsets, count, &stringlist) < 0) { efree(subpat_names); efree(offsets); if (match_sets) efree(match_sets); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Get subpatterns list failed"); RETURN_FALSE; } if (global) { /* global pattern matching */ if (subpats && subpats_order == PREG_PATTERN_ORDER) { /* For each subpattern, insert it into the appropriate array. */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(match_sets[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], NULL); } else { add_next_index_stringl(match_sets[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } /* * If the number of captured subpatterns on this run is * less than the total possible number, pad the result * arrays with empty strings. */ if (count < num_subpats) { for (; i < num_subpats; i++) { add_next_index_string(match_sets[i], "", 1); } } } else { /* Allocate the result set array */ ALLOC_ZVAL(result_set); array_init(result_set); INIT_PZVAL(result_set); /* Add all the subpatterns to it */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(result_set, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], subpat_names[i]); } else { if (subpat_names[i]) { add_assoc_stringl(result_set, subpat_names[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } add_next_index_stringl(result_set, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } /* And add it to the output array */ zend_hash_next_index_insert(Z_ARRVAL_P(subpats), &result_set, sizeof(zval *), NULL); } } else { /* single pattern matching */ /* For each subpattern, insert it into the subpatterns array. */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(subpats, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], subpat_names[i]); } else { if (subpat_names[i]) { add_assoc_stringl(subpats, subpat_names[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } add_next_index_stringl(subpats, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } } pcre_free((void *) stringlist); } } else if (count == PCRE_ERROR_NOMATCH) { /* If we previously set PCRE_NOTEMPTY after a null match, this is not necessarily the end. We need to advance the start offset, and continue. Fudge the offset values to achieve this, unless we're already at the end of the string. */ if (g_notempty != 0 && start_offset < subject_len) { offsets[0] = start_offset; offsets[1] = start_offset + 1; } else break; } else { pcre_handle_exec_error(count TSRMLS_CC); break; } /* If we have matched an empty string, mimic what Perl's /g options does. This turns out to be rather cunning. First we set PCRE_NOTEMPTY and try the match again at the same point. If this fails (picked up above) we advance to the next character. */ g_notempty = (offsets[1] == offsets[0])? PCRE_NOTEMPTY | PCRE_ANCHORED : 0; /* Advance to the position right after the last full match */ start_offset = offsets[1]; } while (global); /* Add the match sets to the output array and clean up */ if (global && subpats && subpats_order == PREG_PATTERN_ORDER) { for (i = 0; i < num_subpats; i++) { if (subpat_names[i]) { zend_hash_update(Z_ARRVAL_P(subpats), subpat_names[i], strlen(subpat_names[i])+1, &match_sets[i], sizeof(zval *), NULL); Z_ADDREF_P(match_sets[i]); } zend_hash_next_index_insert(Z_ARRVAL_P(subpats), &match_sets[i], sizeof(zval *), NULL); } efree(match_sets); } efree(offsets); efree(subpat_names); /* Did we encounter an error? */ if (PCRE_G(error_code) == PHP_PCRE_NO_ERROR) { RETVAL_LONG(matched); } else { RETVAL_FALSE; } }
C
php
1
CVE-2018-6096
https://www.cvedetails.com/cve/CVE-2018-6096/
null
https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51
36f801fdbec07d116a6f4f07bb363f10897d6a51
If a page calls |window.focus()|, kick it out of fullscreen. BUG=776418, 800056 Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017 Reviewed-on: https://chromium-review.googlesource.com/852378 Reviewed-by: Nasko Oskov <[email protected]> Reviewed-by: Philip Jägenstedt <[email protected]> Commit-Queue: Avi Drissman <[email protected]> Cr-Commit-Position: refs/heads/master@{#533790}
void ChromeClientImpl::LayoutUpdated() const { web_view_->LayoutUpdated(); }
void ChromeClientImpl::LayoutUpdated() const { web_view_->LayoutUpdated(); }
C
Chrome
0
CVE-2010-1166
https://www.cvedetails.com/cve/CVE-2010-1166/
CWE-189
https://cgit.freedesktop.org/xorg/xserver/commit/?id=d2f813f7db
d2f813f7db157fc83abc4b3726821c36ee7e40b1
null
fbFetch_a8 (const FbBits *bits, int x, int width, CARD32 *buffer, miIndexedPtr indexed) { const CARD8 *pixel = (const CARD8 *)bits + x; const CARD8 *end = pixel + width; while (pixel < end) { WRITE(buffer++, READ(pixel++) << 24); } }
fbFetch_a8 (const FbBits *bits, int x, int width, CARD32 *buffer, miIndexedPtr indexed) { const CARD8 *pixel = (const CARD8 *)bits + x; const CARD8 *end = pixel + width; while (pixel < end) { WRITE(buffer++, READ(pixel++) << 24); } }
C
xserver
0
CVE-2011-2802
https://www.cvedetails.com/cve/CVE-2011-2802/
CWE-399
https://github.com/chromium/chromium/commit/4ab22cfc619ee8ff17a8c50e289ec3b30731ceba
4ab22cfc619ee8ff17a8c50e289ec3b30731ceba
In chromedriver, add /log url to get the contents of the chromedriver log remotely. Also add a 'chrome.verbose' boolean startup option. Remove usage of VLOG(1) in chromedriver. We do not need as complicated logging as in Chrome. BUG=85241 TEST=none Review URL: http://codereview.chromium.org/7104085 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88591 0039d316-1c4b-4281-b951-d872f2087c98
bool GetDefaultChromeExe(FilePath* browser_exe) { std::vector<FilePath> locations; FilePath module_dir; if (PathService::Get(base::DIR_MODULE, &module_dir)) locations.push_back(module_dir); #if defined(OS_WIN) const wchar_t kSubKey[] = L"Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe"; base::win::RegKey key(HKEY_CURRENT_USER, kSubKey, KEY_READ); std::wstring path; if (key.ReadValue(L"path", &path) == ERROR_SUCCESS) locations.push_back(FilePath(path)); base::win::RegKey sys_key(HKEY_LOCAL_MACHINE, kSubKey, KEY_READ); if (sys_key.ReadValue(L"path", &path) == ERROR_SUCCESS) locations.push_back(FilePath(path)); FilePath app_from_google(L"Google\\Chrome\\Application"); scoped_ptr<base::Environment> env(base::Environment::Create()); std::string home_dir; if (env->GetVar("userprofile", &home_dir)) { FilePath default_location(UTF8ToWide(home_dir)); if (base::win::GetVersion() < base::win::VERSION_VISTA) { default_location = default_location.Append( L"Local Settings\\Application Data"); } else { default_location = default_location.Append(L"AppData\\Local"); } locations.push_back(default_location.Append(app_from_google)); } std::string program_dir; if (env->GetVar("ProgramFiles", &program_dir)) { locations.push_back(FilePath(UTF8ToWide(program_dir)) .Append(app_from_google)); } if (env->GetVar("ProgramFiles(x86)", &program_dir)) { locations.push_back(FilePath(UTF8ToWide(program_dir)) .Append(app_from_google)); } #elif defined(OS_MACOSX) locations.push_back(FilePath("/Applications")); #elif defined(OS_LINUX) FilePath chrome_sym_link("/usr/bin/google-chrome"); if (file_util::PathExists(chrome_sym_link)) { FilePath chrome; if (file_util::ReadSymbolicLink(chrome_sym_link, &chrome)) { locations.push_back(chrome.DirName()); } } #endif FilePath current_dir; if (file_util::GetCurrentDirectory(&current_dir)) locations.push_back(current_dir); for (size_t i = 0; i < locations.size(); ++i) { FilePath path = locations[i].Append(chrome::kBrowserProcessExecutablePath); if (file_util::PathExists(path)) { *browser_exe = path; return true; } } return false; }
bool GetDefaultChromeExe(FilePath* browser_exe) { std::vector<FilePath> locations; FilePath module_dir; if (PathService::Get(base::DIR_MODULE, &module_dir)) locations.push_back(module_dir); #if defined(OS_WIN) const wchar_t kSubKey[] = L"Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe"; base::win::RegKey key(HKEY_CURRENT_USER, kSubKey, KEY_READ); std::wstring path; if (key.ReadValue(L"path", &path) == ERROR_SUCCESS) locations.push_back(FilePath(path)); base::win::RegKey sys_key(HKEY_LOCAL_MACHINE, kSubKey, KEY_READ); if (sys_key.ReadValue(L"path", &path) == ERROR_SUCCESS) locations.push_back(FilePath(path)); FilePath app_from_google(L"Google\\Chrome\\Application"); scoped_ptr<base::Environment> env(base::Environment::Create()); std::string home_dir; if (env->GetVar("userprofile", &home_dir)) { FilePath default_location(UTF8ToWide(home_dir)); if (base::win::GetVersion() < base::win::VERSION_VISTA) { default_location = default_location.Append( L"Local Settings\\Application Data"); } else { default_location = default_location.Append(L"AppData\\Local"); } locations.push_back(default_location.Append(app_from_google)); } std::string program_dir; if (env->GetVar("ProgramFiles", &program_dir)) { locations.push_back(FilePath(UTF8ToWide(program_dir)) .Append(app_from_google)); } if (env->GetVar("ProgramFiles(x86)", &program_dir)) { locations.push_back(FilePath(UTF8ToWide(program_dir)) .Append(app_from_google)); } #elif defined(OS_MACOSX) locations.push_back(FilePath("/Applications")); #elif defined(OS_LINUX) FilePath chrome_sym_link("/usr/bin/google-chrome"); if (file_util::PathExists(chrome_sym_link)) { FilePath chrome; if (file_util::ReadSymbolicLink(chrome_sym_link, &chrome)) { locations.push_back(chrome.DirName()); } } #endif FilePath current_dir; if (file_util::GetCurrentDirectory(&current_dir)) locations.push_back(current_dir); for (size_t i = 0; i < locations.size(); ++i) { FilePath path = locations[i].Append(chrome::kBrowserProcessExecutablePath); if (file_util::PathExists(path)) { *browser_exe = path; return true; } } return false; }
C
Chrome
0
CVE-2018-17206
https://www.cvedetails.com/cve/CVE-2018-17206/
null
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
9237a63c47bd314b807cda0bd2216264e82edbe8
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
parse_POP_QUEUE(const char *arg OVS_UNUSED, struct ofpbuf *ofpacts, enum ofputil_protocol *usable_protocols OVS_UNUSED) { ofpact_put_POP_QUEUE(ofpacts); return NULL; }
parse_POP_QUEUE(const char *arg OVS_UNUSED, struct ofpbuf *ofpacts, enum ofputil_protocol *usable_protocols OVS_UNUSED) { ofpact_put_POP_QUEUE(ofpacts); return NULL; }
C
ovs
0