CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2017-9949
|
https://www.cvedetails.com/cve/CVE-2017-9949/
|
CWE-787
|
https://github.com/radare/radare2/commit/796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191
|
796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191
|
Fix ext2 buffer overflow in r2_sbu_grub_memmove
|
grub_ext2_read_file (grub_fshelp_node_t node,
void (*read_hook) (grub_disk_addr_t sector,
unsigned offset, unsigned length, void *closure),
void *closure, int flags, int pos, grub_size_t len, char *buf)
{
return grub_fshelp_read_file (node->data->disk, node, read_hook, closure,
flags, pos, len, buf, grub_ext2_read_block,
node->inode.size, LOG2_EXT2_BLOCK_SIZE (node->data));
}
|
grub_ext2_read_file (grub_fshelp_node_t node,
void (*read_hook) (grub_disk_addr_t sector,
unsigned offset, unsigned length, void *closure),
void *closure, int flags, int pos, grub_size_t len, char *buf)
{
return grub_fshelp_read_file (node->data->disk, node, read_hook, closure,
flags, pos, len, buf, grub_ext2_read_block,
node->inode.size, LOG2_EXT2_BLOCK_SIZE (node->data));
}
|
C
|
radare2
| 0 |
CVE-2013-2902
|
https://www.cvedetails.com/cve/CVE-2013-2902/
|
CWE-399
|
https://github.com/chromium/chromium/commit/87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
|
87a082c5137a63dedb3fe5b1f48f75dcd1fd780c
|
Removed pinch viewport scroll offset distribution
The associated change in Blink makes the pinch viewport a proper
ScrollableArea meaning the normal path for synchronizing layer scroll
offsets is used.
This is a 2 sided patch, the other CL:
https://codereview.chromium.org/199253002/
BUG=349941
Review URL: https://codereview.chromium.org/210543002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260105 0039d316-1c4b-4281-b951-d872f2087c98
|
void LayerTreeHost::UpdateTopControlsState(TopControlsState constraints,
TopControlsState current,
bool animate) {
if (!settings_.calculate_top_controls_position)
return;
proxy_->ImplThreadTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&TopControlsManager::UpdateTopControlsState,
top_controls_manager_weak_ptr_,
constraints,
current,
animate));
}
|
void LayerTreeHost::UpdateTopControlsState(TopControlsState constraints,
TopControlsState current,
bool animate) {
if (!settings_.calculate_top_controls_position)
return;
proxy_->ImplThreadTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&TopControlsManager::UpdateTopControlsState,
top_controls_manager_weak_ptr_,
constraints,
current,
animate));
}
|
C
|
Chrome
| 0 |
CVE-2019-1010293
|
https://www.cvedetails.com/cve/CVE-2019-1010293/
|
CWE-20
|
https://github.com/OP-TEE/optee_os/commit/95f36d661f2b75887772ea28baaad904bde96970
|
95f36d661f2b75887772ea28baaad904bde96970
|
core: tee_mmu_check_access_rights() check all pages
Prior to this patch tee_mmu_check_access_rights() checks an address in
each page of a supplied range. If both the start and length of that
range is unaligned the last page in the range is sometimes not checked.
With this patch the first address of each page in the range is checked
to simplify the logic of checking each page and the range and also to
cover the last page under all circumstances.
Fixes: OP-TEE-2018-0005: "tee_mmu_check_access_rights does not check
final page of TA buffer"
Signed-off-by: Jens Wiklander <[email protected]>
Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8)
Reviewed-by: Joakim Bech <[email protected]>
Reported-by: Riscure <[email protected]>
Reported-by: Alyssa Milburn <[email protected]>
Acked-by: Etienne Carriere <[email protected]>
|
TEE_Result tee_mmu_check_access_rights(const struct user_ta_ctx *utc,
uint32_t flags, uaddr_t uaddr,
size_t len)
{
uaddr_t a;
uaddr_t end_addr = 0;
size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
CORE_MMU_USER_PARAM_SIZE);
if (ADD_OVERFLOW(uaddr, len, &end_addr))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
(flags & TEE_MEMORY_ACCESS_SECURE))
return TEE_ERROR_ACCESS_DENIED;
/*
* Rely on TA private memory test to check if address range is private
* to TA or not.
*/
if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
!tee_mmu_is_vbuf_inside_ta_private(utc, (void *)uaddr, len))
return TEE_ERROR_ACCESS_DENIED;
for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) {
uint32_t attr;
TEE_Result res;
res = tee_mmu_user_va2pa_attr(utc, (void *)a, NULL, &attr);
if (res != TEE_SUCCESS)
return res;
if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
(attr & TEE_MATTR_SECURE))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
!(attr & TEE_MATTR_SECURE))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
return TEE_ERROR_ACCESS_DENIED;
}
return TEE_SUCCESS;
}
|
TEE_Result tee_mmu_check_access_rights(const struct user_ta_ctx *utc,
uint32_t flags, uaddr_t uaddr,
size_t len)
{
uaddr_t a;
size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE,
CORE_MMU_USER_PARAM_SIZE);
if (ADD_OVERFLOW(uaddr, len, &a))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
(flags & TEE_MEMORY_ACCESS_SECURE))
return TEE_ERROR_ACCESS_DENIED;
/*
* Rely on TA private memory test to check if address range is private
* to TA or not.
*/
if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) &&
!tee_mmu_is_vbuf_inside_ta_private(utc, (void *)uaddr, len))
return TEE_ERROR_ACCESS_DENIED;
for (a = uaddr; a < (uaddr + len); a += addr_incr) {
uint32_t attr;
TEE_Result res;
res = tee_mmu_user_va2pa_attr(utc, (void *)a, NULL, &attr);
if (res != TEE_SUCCESS)
return res;
if ((flags & TEE_MEMORY_ACCESS_NONSECURE) &&
(attr & TEE_MATTR_SECURE))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_SECURE) &&
!(attr & TEE_MATTR_SECURE))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW))
return TEE_ERROR_ACCESS_DENIED;
if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR))
return TEE_ERROR_ACCESS_DENIED;
}
return TEE_SUCCESS;
}
|
C
|
optee_os
| 1 |
CVE-2015-8961
|
https://www.cvedetails.com/cve/CVE-2015-8961/
|
CWE-416
|
https://github.com/torvalds/linux/commit/6934da9238da947628be83635e365df41064b09b
|
6934da9238da947628be83635e365df41064b09b
|
ext4: fix potential use after free in __ext4_journal_stop
There is a use-after-free possibility in __ext4_journal_stop() in the
case that we free the handle in the first jbd2_journal_stop() because
we're referencing handle->h_err afterwards. This was introduced in
9705acd63b125dee8b15c705216d7186daea4625 and it is wrong. Fix it by
storing the handle->h_err value beforehand and avoid referencing
potentially freed handle.
Fixes: 9705acd63b125dee8b15c705216d7186daea4625
Signed-off-by: Lukas Czerner <[email protected]>
Reviewed-by: Andreas Dilger <[email protected]>
Cc: [email protected]
|
int __ext4_journal_get_create_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
int err = 0;
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_create_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
}
return err;
}
|
int __ext4_journal_get_create_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
int err = 0;
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_create_access(handle, bh);
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
}
return err;
}
|
C
|
linux
| 0 |
CVE-2015-1265
|
https://www.cvedetails.com/cve/CVE-2015-1265/
| null |
https://github.com/chromium/chromium/commit/04ff52bb66284467ccb43d90800013b89ee8db75
|
04ff52bb66284467ccb43d90800013b89ee8db75
|
Switching AudioOutputAuthorizationHandler from using AudioManager interface to AudioSystem one.
BUG=672468
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Review-Url: https://codereview.chromium.org/2692203003
Cr-Commit-Position: refs/heads/master@{#450939}
|
MockAudioRendererHost(base::RunLoop* auth_run_loop,
int render_process_id,
media::AudioManager* audio_manager,
media::AudioSystem* audio_system,
AudioMirroringManager* mirroring_manager,
MediaStreamManager* media_stream_manager,
const std::string& salt)
: AudioRendererHost(render_process_id,
audio_manager,
audio_system,
mirroring_manager,
media_stream_manager,
salt),
shared_memory_length_(0),
auth_run_loop_(auth_run_loop) {
set_render_frame_id_validate_function_for_testing(&ValidateRenderFrameId);
}
|
MockAudioRendererHost(base::RunLoop* auth_run_loop,
int render_process_id,
media::AudioManager* audio_manager,
AudioMirroringManager* mirroring_manager,
MediaStreamManager* media_stream_manager,
const std::string& salt)
: AudioRendererHost(render_process_id,
audio_manager,
mirroring_manager,
media_stream_manager,
salt),
shared_memory_length_(0),
auth_run_loop_(auth_run_loop) {
set_render_frame_id_validate_function_for_testing(&ValidateRenderFrameId);
}
|
C
|
Chrome
| 1 |
CVE-2016-1624
|
https://www.cvedetails.com/cve/CVE-2016-1624/
|
CWE-119
|
https://github.com/chromium/chromium/commit/7716418a27d561ee295a99f11fd3865580748de2
|
7716418a27d561ee295a99f11fd3865580748de2
|
Cherry pick underflow fix.
BUG=583607
Review URL: https://codereview.chromium.org/1662313002
Cr-Commit-Position: refs/heads/master@{#373736}
|
static BROTLI_INLINE int SafeReadDistance(BrotliState* s, BrotliBitReader* br) {
return ReadDistanceInternal(1, s, br);
}
|
static BROTLI_INLINE int SafeReadDistance(BrotliState* s, BrotliBitReader* br) {
return ReadDistanceInternal(1, s, br);
}
|
C
|
Chrome
| 0 |
CVE-2012-2390
|
https://www.cvedetails.com/cve/CVE-2012-2390/
|
CWE-399
|
https://github.com/torvalds/linux/commit/c50ac050811d6485616a193eb0f37bfbd191cc89
|
c50ac050811d6485616a193eb0f37bfbd191cc89
|
hugetlb: fix resv_map leak in error path
When called for anonymous (non-shared) mappings, hugetlb_reserve_pages()
does a resv_map_alloc(). It depends on code in hugetlbfs's
vm_ops->close() to release that allocation.
However, in the mmap() failure path, we do a plain unmap_region() without
the remove_vma() which actually calls vm_ops->close().
This is a decent fix. This leak could get reintroduced if new code (say,
after hugetlb_reserve_pages() in hugetlbfs_file_mmap()) decides to return
an error. But, I think it would have to unroll the reservation anyway.
Christoph's test case:
http://marc.info/?l=linux-mm&m=133728900729735
This patch applies to 3.4 and later. A version for earlier kernels is at
https://lkml.org/lkml/2012/5/22/418.
Signed-off-by: Dave Hansen <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Acked-by: KOSAKI Motohiro <[email protected]>
Reported-by: Christoph Lameter <[email protected]>
Tested-by: Christoph Lameter <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: <[email protected]> [2.6.32+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void __init hugetlb_sysfs_init(void)
{
struct hstate *h;
int err;
hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
if (!hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
|
static void __init hugetlb_sysfs_init(void)
{
struct hstate *h;
int err;
hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
if (!hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
|
C
|
linux
| 0 |
CVE-2013-2141
|
https://www.cvedetails.com/cve/CVE-2013-2141/
|
CWE-399
|
https://github.com/torvalds/linux/commit/b9e146d8eb3b9ecae5086d373b50fa0c1f3e7f0f
|
b9e146d8eb3b9ecae5086d373b50fa0c1f3e7f0f
|
kernel/signal.c: stop info leak via the tkill and the tgkill syscalls
This fixes a kernel memory contents leak via the tkill and tgkill syscalls
for compat processes.
This is visible in the siginfo_t->_sifields._rt.si_sigval.sival_ptr field
when handling signals delivered from tkill.
The place of the infoleak:
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
...
put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
...
}
Signed-off-by: Emese Revfy <[email protected]>
Reviewed-by: PaX Team <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Serge Hallyn <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
atomic_dec(&q->user->sigpending);
free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
}
|
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
atomic_dec(&q->user->sigpending);
free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
}
|
C
|
linux
| 0 |
CVE-2011-3106
|
https://www.cvedetails.com/cve/CVE-2011-3106/
|
CWE-119
|
https://github.com/chromium/chromium/commit/5385c44d9634d00b1cec2abf0fe7290d4205c7b0
|
5385c44d9634d00b1cec2abf0fe7290d4205c7b0
|
Inherits SupportsWeakPtr<T> instead of having WeakPtrFactory<T>
This change refines r137676.
BUG=122654
TEST=browser_test
Review URL: https://chromiumcodereview.appspot.com/10332233
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139771 0039d316-1c4b-4281-b951-d872f2087c98
|
void ResourceDispatcherHostImpl::StartReading(net::URLRequest* request) {
int bytes_read = 0;
if (Read(request, &bytes_read)) {
OnReadCompleted(request, bytes_read);
} else if (!request->status().is_io_pending()) {
DCHECK(!ResourceRequestInfoImpl::ForRequest(request)->is_paused());
ResponseCompleted(request);
}
}
|
void ResourceDispatcherHostImpl::StartReading(net::URLRequest* request) {
int bytes_read = 0;
if (Read(request, &bytes_read)) {
OnReadCompleted(request, bytes_read);
} else if (!request->status().is_io_pending()) {
DCHECK(!ResourceRequestInfoImpl::ForRequest(request)->is_paused());
ResponseCompleted(request);
}
}
|
C
|
Chrome
| 0 |
CVE-2013-4119
|
https://www.cvedetails.com/cve/CVE-2013-4119/
|
CWE-476
|
https://github.com/FreeRDP/FreeRDP/commit/0773bb9303d24473fe1185d85a424dfe159aff53
|
0773bb9303d24473fe1185d85a424dfe159aff53
|
nla: invalidate sec handle after creation
If sec pointer isn't invalidated after creation it is not possible
to check if the upper and lower pointers are valid.
This fixes a segfault in the server part if the client disconnects before
the authentication was finished.
|
SECURITY_STATUS SEC_ENTRY AcquireCredentialsHandleA(SEC_CHAR* pszPrincipal, SEC_CHAR* pszPackage,
ULONG fCredentialUse, void* pvLogonID, void* pAuthData, SEC_GET_KEY_FN pGetKeyFn,
void* pvGetKeyArgument, PCredHandle phCredential, PTimeStamp ptsExpiry)
{
SECURITY_STATUS status;
SecurityFunctionTableA* table = sspi_GetSecurityFunctionTableAByNameA(pszPackage);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->AcquireCredentialsHandleA == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->AcquireCredentialsHandleA(pszPrincipal, pszPackage, fCredentialUse,
pvLogonID, pAuthData, pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
return status;
}
|
SECURITY_STATUS SEC_ENTRY AcquireCredentialsHandleA(SEC_CHAR* pszPrincipal, SEC_CHAR* pszPackage,
ULONG fCredentialUse, void* pvLogonID, void* pAuthData, SEC_GET_KEY_FN pGetKeyFn,
void* pvGetKeyArgument, PCredHandle phCredential, PTimeStamp ptsExpiry)
{
SECURITY_STATUS status;
SecurityFunctionTableA* table = sspi_GetSecurityFunctionTableAByNameA(pszPackage);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->AcquireCredentialsHandleA == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->AcquireCredentialsHandleA(pszPrincipal, pszPackage, fCredentialUse,
pvLogonID, pAuthData, pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
return status;
}
|
C
|
FreeRDP
| 0 |
CVE-2016-9919
|
https://www.cvedetails.com/cve/CVE-2016-9919/
|
CWE-20
|
https://github.com/torvalds/linux/commit/79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2
|
79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2
|
net: handle no dst on skb in icmp6_send
Andrey reported the following while fuzzing the kernel with syzkaller:
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff8800666d4200 task.stack: ffff880067348000
RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>]
icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451
RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206
RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018
RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003
R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000
R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0
FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0
Stack:
ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460
ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046
ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000
Call Trace:
[<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557
[< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88
[<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157
[<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663
[<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191
...
icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both
cases the dst->dev should be preferred for determining the L3 domain
if the dst has been set on the skb. Fallback to the skb->dev if it has
not. This covers the case reported here where icmp6_send is invoked on
Rx before the route lookup.
Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David Ahern <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void __net_exit icmpv6_sk_exit(struct net *net)
{
int i;
for_each_possible_cpu(i) {
inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
}
kfree(net->ipv6.icmp_sk);
}
|
static void __net_exit icmpv6_sk_exit(struct net *net)
{
int i;
for_each_possible_cpu(i) {
inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
}
kfree(net->ipv6.icmp_sk);
}
|
C
|
linux
| 0 |
CVE-2018-20784
|
https://www.cvedetails.com/cve/CVE-2018-20784/
|
CWE-400
|
https://github.com/torvalds/linux/commit/c40f7d74c741a907cfaeb73a7697081881c497d0
|
c40f7d74c741a907cfaeb73a7697081881c497d0
|
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
if (unlikely(!curr))
return;
delta_exec = now - curr->exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
curr->exec_start = now;
schedstat_set(curr->statistics.exec_max,
max(delta_exec, curr->statistics.exec_max));
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
cgroup_account_cputime(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
|
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
if (unlikely(!curr))
return;
delta_exec = now - curr->exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
curr->exec_start = now;
schedstat_set(curr->statistics.exec_max,
max(delta_exec, curr->statistics.exec_max));
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
cgroup_account_cputime(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
account_cfs_rq_runtime(cfs_rq, delta_exec);
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
DevTools: 'Overrides' UI overlay obstructs page and element inspector
BUG=302862
[email protected]
Review URL: https://codereview.chromium.org/40233006
git-svn-id: svn://svn.chromium.org/blink/trunk@160559 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void InspectorResourceAgent::didReceiveXHRResponse(unsigned long identifier)
{
m_resourcesData->setResourceType(IdentifiersFactory::requestId(identifier), InspectorPageAgent::XHRResource);
}
|
void InspectorResourceAgent::didReceiveXHRResponse(unsigned long identifier)
{
m_resourcesData->setResourceType(IdentifiersFactory::requestId(identifier), InspectorPageAgent::XHRResource);
}
|
C
|
Chrome
| 0 |
CVE-2011-3209
|
https://www.cvedetails.com/cve/CVE-2011-3209/
|
CWE-189
|
https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d
|
f8bd2258e2d520dff28c855658bd24bdafb5102d
|
remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: john stultz <[email protected]>
Cc: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void print_slabinfo_header(struct seq_file *m)
{
seq_puts(m, "slabinfo - version: 2.1\n");
seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
"<objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
seq_putc(m, '\n');
}
|
static void print_slabinfo_header(struct seq_file *m)
{
seq_puts(m, "slabinfo - version: 2.1\n");
seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
"<objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
seq_putc(m, '\n');
}
|
C
|
linux
| 0 |
CVE-2012-2894
|
https://www.cvedetails.com/cve/CVE-2012-2894/
|
CWE-399
|
https://github.com/chromium/chromium/commit/9dc6161824d61e899c282cfe9aa40a4d3031974d
|
9dc6161824d61e899c282cfe9aa40a4d3031974d
|
[cros] Allow media streaming for OOBE WebUI.
BUG=122764
TEST=Manual with --enable-html5-camera
Review URL: https://chromiumcodereview.appspot.com/10693027
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@144899 0039d316-1c4b-4281-b951-d872f2087c98
|
void WebUILoginView::LoadURL(const GURL & url) {
webui_login_->LoadInitialURL(url);
webui_login_->RequestFocus();
CommandLine* command_line = CommandLine::ForCurrentProcess();
if (BaseLoginDisplayHost::default_host() &&
command_line->HasSwitch(switches::kEnableNewOobe)) {
SkBitmap background;
background.setConfig(SkBitmap::kARGB_8888_Config, 1, 1);
background.allocPixels();
background.eraseARGB(0x00, 0x00, 0x00, 0x00);
content::RenderViewHost* host =
tab_contents_->web_contents()->GetRenderViewHost();
host->GetView()->SetBackground(background);
}
}
|
void WebUILoginView::LoadURL(const GURL & url) {
webui_login_->LoadInitialURL(url);
webui_login_->RequestFocus();
CommandLine* command_line = CommandLine::ForCurrentProcess();
if (BaseLoginDisplayHost::default_host() &&
command_line->HasSwitch(switches::kEnableNewOobe)) {
SkBitmap background;
background.setConfig(SkBitmap::kARGB_8888_Config, 1, 1);
background.allocPixels();
background.eraseARGB(0x00, 0x00, 0x00, 0x00);
content::RenderViewHost* host =
tab_contents_->web_contents()->GetRenderViewHost();
host->GetView()->SetBackground(background);
}
}
|
C
|
Chrome
| 0 |
CVE-2016-7906
|
https://www.cvedetails.com/cve/CVE-2016-7906/
|
CWE-416
|
https://github.com/ImageMagick/ImageMagick/commit/d63a3c5729df59f183e9e110d5d8385d17caaad0
|
d63a3c5729df59f183e9e110d5d8385d17caaad0
|
https://github.com/ImageMagick/ImageMagick/issues/281
|
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
|
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
|
C
|
ImageMagick
| 0 |
CVE-2017-8890
|
https://www.cvedetails.com/cve/CVE-2017-8890/
|
CWE-415
|
https://github.com/torvalds/linux/commit/657831ffc38e30092a2d5f03d385d710eb88b09a
|
657831ffc38e30092a2d5f03d385d710eb88b09a
|
dccp/tcp: do not inherit mc_list from parent
syzkaller found a way to trigger double frees from ip_mc_drop_socket()
It turns out that leave a copy of parent mc_list at accept() time,
which is very bad.
Very similar to commit 8b485ce69876 ("tcp: do not inherit
fastopen_req from parent")
Initial report from Pray3r, completed by Andrey one.
Thanks a lot to them !
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Pray3r <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_af_ops->compat_setsockopt)
return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
optval, optlen);
}
|
int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_af_ops->compat_setsockopt)
return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
optval, optlen);
}
|
C
|
linux
| 0 |
CVE-2011-3896
|
https://www.cvedetails.com/cve/CVE-2011-3896/
|
CWE-119
|
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
|
5925dff83699508b5e2735afb0297dfb310e159d
|
Implement a bubble that appears at the top of the screen when a tab enters
fullscreen mode via webkitRequestFullScreen(), telling the user how to exit
fullscreen.
This is implemented as an NSView rather than an NSWindow because the floating
chrome that appears in presentation mode should overlap the bubble.
Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac:
the mode in which the UI is hidden, accessible by moving the cursor to the top
of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode.
On Lion, however, fullscreen mode does not imply presentation mode: in
non-presentation fullscreen mode, the chrome is permanently shown. It is
possible to switch between presentation mode and fullscreen mode using the
presentation mode UI control.
When a tab initiates fullscreen mode on Lion, we enter presentation mode if not
in presentation mode already. When the user exits fullscreen mode using Chrome
UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we
return the user to the mode they were in before the tab entered fullscreen.
BUG=14471
TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen.
Need to test the Lion logic somehow, with no Lion trybots.
BUG=96883
Original review http://codereview.chromium.org/7890056/
TBR=thakis
Review URL: http://codereview.chromium.org/7920024
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
|
void Browser::ShowContentSettingsPage(ContentSettingsType content_type) {
ShowOptionsTab(
chrome::kContentSettingsExceptionsSubPage + std::string(kHashMark) +
ContentSettingsHandler::ContentSettingsTypeToGroupName(content_type));
}
|
void Browser::ShowContentSettingsPage(ContentSettingsType content_type) {
ShowOptionsTab(
chrome::kContentSettingsExceptionsSubPage + std::string(kHashMark) +
ContentSettingsHandler::ContentSettingsTypeToGroupName(content_type));
}
|
C
|
Chrome
| 0 |
CVE-2011-2858
|
https://www.cvedetails.com/cve/CVE-2011-2858/
|
CWE-119
|
https://github.com/chromium/chromium/commit/c13e1da62b5f5f0e6fe8c1f769a5a28415415244
|
c13e1da62b5f5f0e6fe8c1f769a5a28415415244
|
Revert "Revert 100494 - Fix bug in SimulateAttrib0."""
TEST=none
BUG=95625
[email protected]
Review URL: http://codereview.chromium.org/7796016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@100507 0039d316-1c4b-4281-b951-d872f2087c98
|
bool GLES2DecoderImpl::SetBlackTextureForNonRenderableTextures() {
DCHECK(current_program_);
if (!texture_manager()->HaveUnrenderableTextures()) {
return false;
}
bool textures_set = false;
const ProgramManager::ProgramInfo::SamplerIndices& sampler_indices =
current_program_->sampler_indices();
for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
const ProgramManager::ProgramInfo::UniformInfo* uniform_info =
current_program_->GetUniformInfo(sampler_indices[ii]);
DCHECK(uniform_info);
for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
GLuint texture_unit_index = uniform_info->texture_units[jj];
if (texture_unit_index < group_->max_texture_units()) {
TextureUnit& texture_unit = texture_units_[texture_unit_index];
TextureManager::TextureInfo* texture_info =
texture_unit.GetInfoForSamplerType(uniform_info->type);
if (!texture_info || !texture_info->CanRender(feature_info_)) {
textures_set = true;
glActiveTexture(GL_TEXTURE0 + texture_unit_index);
glBindTexture(
GetBindTargetForSamplerType(uniform_info->type),
texture_manager()->black_texture_id(uniform_info->type));
}
}
}
}
return textures_set;
}
|
bool GLES2DecoderImpl::SetBlackTextureForNonRenderableTextures() {
DCHECK(current_program_);
if (!texture_manager()->HaveUnrenderableTextures()) {
return false;
}
bool textures_set = false;
const ProgramManager::ProgramInfo::SamplerIndices& sampler_indices =
current_program_->sampler_indices();
for (size_t ii = 0; ii < sampler_indices.size(); ++ii) {
const ProgramManager::ProgramInfo::UniformInfo* uniform_info =
current_program_->GetUniformInfo(sampler_indices[ii]);
DCHECK(uniform_info);
for (size_t jj = 0; jj < uniform_info->texture_units.size(); ++jj) {
GLuint texture_unit_index = uniform_info->texture_units[jj];
if (texture_unit_index < group_->max_texture_units()) {
TextureUnit& texture_unit = texture_units_[texture_unit_index];
TextureManager::TextureInfo* texture_info =
texture_unit.GetInfoForSamplerType(uniform_info->type);
if (!texture_info || !texture_info->CanRender(feature_info_)) {
textures_set = true;
glActiveTexture(GL_TEXTURE0 + texture_unit_index);
glBindTexture(
GetBindTargetForSamplerType(uniform_info->type),
texture_manager()->black_texture_id(uniform_info->type));
}
}
}
}
return textures_set;
}
|
C
|
Chrome
| 0 |
CVE-2017-5009
|
https://www.cvedetails.com/cve/CVE-2017-5009/
|
CWE-119
|
https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
|
1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
|
DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Commit-Queue: Andrey Lushnikov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#507936}
|
void InspectorNetworkAgent::DidFinishLoading(unsigned long identifier,
DocumentLoader*,
double monotonic_finish_time,
int64_t encoded_data_length,
int64_t decoded_body_length) {
String request_id = IdentifiersFactory::RequestId(identifier);
NetworkResourcesData::ResourceData const* resource_data =
resources_data_->Data(request_id);
int pending_encoded_data_length =
resources_data_->GetAndClearPendingEncodedDataLength(request_id);
if (pending_encoded_data_length > 0) {
GetFrontend()->dataReceived(request_id, MonotonicallyIncreasingTime(), 0,
pending_encoded_data_length);
}
if (resource_data &&
(!resource_data->CachedResource() ||
resource_data->CachedResource()->GetDataBufferingPolicy() ==
kDoNotBufferData ||
IsErrorStatusCode(resource_data->HttpStatusCode()))) {
resources_data_->MaybeAddResourceData(request_id, "", 0);
}
resources_data_->MaybeDecodeDataToContent(request_id);
if (!monotonic_finish_time)
monotonic_finish_time = MonotonicallyIncreasingTime();
GetFrontend()->loadingFinished(request_id, monotonic_finish_time,
encoded_data_length);
}
|
void InspectorNetworkAgent::DidFinishLoading(unsigned long identifier,
DocumentLoader*,
double monotonic_finish_time,
int64_t encoded_data_length,
int64_t decoded_body_length) {
String request_id = IdentifiersFactory::RequestId(identifier);
NetworkResourcesData::ResourceData const* resource_data =
resources_data_->Data(request_id);
int pending_encoded_data_length =
resources_data_->GetAndClearPendingEncodedDataLength(request_id);
if (pending_encoded_data_length > 0) {
GetFrontend()->dataReceived(request_id, MonotonicallyIncreasingTime(), 0,
pending_encoded_data_length);
}
if (resource_data &&
(!resource_data->CachedResource() ||
resource_data->CachedResource()->GetDataBufferingPolicy() ==
kDoNotBufferData ||
IsErrorStatusCode(resource_data->HttpStatusCode()))) {
resources_data_->MaybeAddResourceData(request_id, "", 0);
}
resources_data_->MaybeDecodeDataToContent(request_id);
if (!monotonic_finish_time)
monotonic_finish_time = MonotonicallyIncreasingTime();
GetFrontend()->loadingFinished(request_id, monotonic_finish_time,
encoded_data_length);
}
|
C
|
Chrome
| 0 |
CVE-2016-2117
|
https://www.cvedetails.com/cve/CVE-2016-2117/
|
CWE-200
|
https://github.com/torvalds/linux/commit/f43bfaeddc79effbf3d0fcb53ca477cca66f3db8
|
f43bfaeddc79effbf3d0fcb53ca477cca66f3db8
|
atl2: Disable unimplemented scatter/gather feature
atl2 includes NETIF_F_SG in hw_features even though it has no support
for non-linear skbs. This bug was originally harmless since the
driver does not claim to implement checksum offload and that used to
be a requirement for SG.
Now that SG and checksum offload are independent features, if you
explicitly enable SG *and* use one of the rare protocols that can use
SG without checkusm offload, this potentially leaks sensitive
information (before you notice that it just isn't working). Therefore
this obscure bug has been designated CVE-2016-2117.
Reported-by: Justin Yackoski <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Fixes: ec5f06156423 ("net: Kill link between CSUM and SG features.")
Signed-off-by: David S. Miller <[email protected]>
|
static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
{
s32 ret_val;
s16 mii_autoneg_adv_reg;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
/* Need to parse autoneg_advertised and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
/* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9). */
mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
/* Need to parse MediaType and setup the
* appropriate PHY registers. */
switch (hw->MediaType) {
case MEDIA_TYPE_AUTO_SENSOR:
mii_autoneg_adv_reg |=
(MII_AR_10T_HD_CAPS |
MII_AR_10T_FD_CAPS |
MII_AR_100TX_HD_CAPS|
MII_AR_100TX_FD_CAPS);
hw->autoneg_advertised =
ADVERTISE_10_HALF |
ADVERTISE_10_FULL |
ADVERTISE_100_HALF|
ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_FULL:
mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
return 0;
}
|
static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
{
s32 ret_val;
s16 mii_autoneg_adv_reg;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
/* Need to parse autoneg_advertised and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
/* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9). */
mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
/* Need to parse MediaType and setup the
* appropriate PHY registers. */
switch (hw->MediaType) {
case MEDIA_TYPE_AUTO_SENSOR:
mii_autoneg_adv_reg |=
(MII_AR_10T_HD_CAPS |
MII_AR_10T_FD_CAPS |
MII_AR_100TX_HD_CAPS|
MII_AR_100TX_FD_CAPS);
hw->autoneg_advertised =
ADVERTISE_10_HALF |
ADVERTISE_10_FULL |
ADVERTISE_100_HALF|
ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_FULL:
mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
return 0;
}
|
C
|
linux
| 0 |
CVE-2017-6903
|
https://www.cvedetails.com/cve/CVE-2017-6903/
|
CWE-269
|
https://github.com/iortcw/iortcw/commit/b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
|
All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
|
void CL_Init( void ) {
Com_Printf( "----- Client Initialization -----\n" );
Con_Init();
if(!com_fullyInitialized)
{
CL_ClearState();
clc.state = CA_DISCONNECTED; // no longer CA_UNINITIALIZED
cl_oldGameSet = qfalse;
}
cls.realtime = 0;
CL_InitInput();
cl_noprint = Cvar_Get( "cl_noprint", "0", 0 );
#ifdef UPDATE_SERVER_NAME
cl_motd = Cvar_Get( "cl_motd", "1", 0 );
#endif
cl_timeout = Cvar_Get( "cl_timeout", "200", 0 );
cl_timeNudge = Cvar_Get( "cl_timeNudge", "0", CVAR_TEMP );
cl_shownet = Cvar_Get( "cl_shownet", "0", CVAR_TEMP );
cl_showSend = Cvar_Get( "cl_showSend", "0", CVAR_TEMP );
cl_showTimeDelta = Cvar_Get( "cl_showTimeDelta", "0", CVAR_TEMP );
cl_freezeDemo = Cvar_Get( "cl_freezeDemo", "0", CVAR_TEMP );
rcon_client_password = Cvar_Get( "rconPassword", "", CVAR_TEMP );
cl_activeAction = Cvar_Get( "activeAction", "", CVAR_TEMP );
cl_timedemo = Cvar_Get( "timedemo", "0", 0 );
cl_timedemoLog = Cvar_Get ("cl_timedemoLog", "", CVAR_ARCHIVE);
cl_autoRecordDemo = Cvar_Get ("cl_autoRecordDemo", "0", CVAR_ARCHIVE);
cl_aviFrameRate = Cvar_Get ("cl_aviFrameRate", "25", CVAR_ARCHIVE);
cl_aviMotionJpeg = Cvar_Get ("cl_aviMotionJpeg", "1", CVAR_ARCHIVE);
cl_avidemo = Cvar_Get( "cl_avidemo", "0", 0 );
cl_forceavidemo = Cvar_Get( "cl_forceavidemo", "0", 0 );
rconAddress = Cvar_Get( "rconAddress", "", 0 );
cl_yawspeed = Cvar_Get( "cl_yawspeed", "140", CVAR_ARCHIVE );
cl_pitchspeed = Cvar_Get( "cl_pitchspeed", "140", CVAR_ARCHIVE );
cl_anglespeedkey = Cvar_Get( "cl_anglespeedkey", "1.5", 0 );
cl_maxpackets = Cvar_Get( "cl_maxpackets", "38", CVAR_ARCHIVE );
cl_packetdup = Cvar_Get( "cl_packetdup", "1", CVAR_ARCHIVE );
cl_run = Cvar_Get( "cl_run", "1", CVAR_ARCHIVE );
cl_sensitivity = Cvar_Get( "sensitivity", "5", CVAR_ARCHIVE );
cl_mouseAccel = Cvar_Get( "cl_mouseAccel", "0", CVAR_ARCHIVE );
cl_freelook = Cvar_Get( "cl_freelook", "1", CVAR_ARCHIVE );
cl_mouseAccelStyle = Cvar_Get( "cl_mouseAccelStyle", "0", CVAR_ARCHIVE );
cl_mouseAccelOffset = Cvar_Get( "cl_mouseAccelOffset", "5", CVAR_ARCHIVE );
Cvar_CheckRange(cl_mouseAccelOffset, 0.001f, 50000.0f, qfalse);
cl_showMouseRate = Cvar_Get( "cl_showmouserate", "0", 0 );
cl_allowDownload = Cvar_Get( "cl_allowDownload", "0", CVAR_ARCHIVE );
#ifdef USE_CURL_DLOPEN
cl_cURLLib = Cvar_Get("cl_cURLLib", DEFAULT_CURL_LIB, CVAR_ARCHIVE | CVAR_PROTECTED);
#endif
Cvar_Get( "cg_autoswitch", "2", CVAR_ARCHIVE );
Cvar_Get( "cg_wolfparticles", "1", CVAR_ARCHIVE );
cl_conXOffset = Cvar_Get( "cl_conXOffset", "0", 0 );
cl_inGameVideo = Cvar_Get( "r_inGameVideo", "1", CVAR_ARCHIVE );
cl_serverStatusResendTime = Cvar_Get( "cl_serverStatusResendTime", "750", 0 );
cl_recoilPitch = Cvar_Get( "cg_recoilPitch", "0", CVAR_ROM );
m_pitch = Cvar_Get( "m_pitch", "0.022", CVAR_ARCHIVE );
m_yaw = Cvar_Get( "m_yaw", "0.022", CVAR_ARCHIVE );
m_forward = Cvar_Get( "m_forward", "0.25", CVAR_ARCHIVE );
m_side = Cvar_Get( "m_side", "0.25", CVAR_ARCHIVE );
m_filter = Cvar_Get( "m_filter", "0", CVAR_ARCHIVE );
j_pitch = Cvar_Get ("j_pitch", "0.022", CVAR_ARCHIVE);
j_yaw = Cvar_Get ("j_yaw", "-0.022", CVAR_ARCHIVE);
j_forward = Cvar_Get ("j_forward", "-0.25", CVAR_ARCHIVE);
j_side = Cvar_Get ("j_side", "0.25", CVAR_ARCHIVE);
j_up = Cvar_Get ("j_up", "0", CVAR_ARCHIVE);
j_pitch_axis = Cvar_Get ("j_pitch_axis", "3", CVAR_ARCHIVE);
j_yaw_axis = Cvar_Get ("j_yaw_axis", "2", CVAR_ARCHIVE);
j_forward_axis = Cvar_Get ("j_forward_axis", "1", CVAR_ARCHIVE);
j_side_axis = Cvar_Get ("j_side_axis", "0", CVAR_ARCHIVE);
j_up_axis = Cvar_Get ("j_up_axis", "4", CVAR_ARCHIVE);
Cvar_CheckRange(j_pitch_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_yaw_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_forward_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_side_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_up_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
cl_motdString = Cvar_Get( "cl_motdString", "", CVAR_ROM );
Cvar_Get( "cl_maxPing", "800", CVAR_ARCHIVE );
cl_lanForcePackets = Cvar_Get ("cl_lanForcePackets", "1", CVAR_ARCHIVE);
cl_guidServerUniq = Cvar_Get ("cl_guidServerUniq", "1", CVAR_ARCHIVE);
cl_consoleKeys = Cvar_Get( "cl_consoleKeys", "~ ` 0x7e 0x60", CVAR_ARCHIVE);
Cvar_Get( "name", "WolfPlayer", CVAR_USERINFO | CVAR_ARCHIVE );
cl_rate = Cvar_Get( "rate", "25000", CVAR_USERINFO | CVAR_ARCHIVE ); // NERVE - SMF - changed from 3000
Cvar_Get( "snaps", "20", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "model", "bj2", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "head", "default", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "color", "4", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "handicap", "100", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "sex", "male", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "cl_anonymous", "0", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "password", "", CVAR_USERINFO );
Cvar_Get( "cg_predictItems", "1", CVAR_USERINFO | CVAR_ARCHIVE );
#ifdef USE_MUMBLE
cl_useMumble = Cvar_Get ("cl_useMumble", "0", CVAR_ARCHIVE | CVAR_LATCH);
cl_mumbleScale = Cvar_Get ("cl_mumbleScale", "0.0254", CVAR_ARCHIVE);
#endif
#ifdef USE_VOIP
cl_voipSend = Cvar_Get ("cl_voipSend", "0", 0);
cl_voipSendTarget = Cvar_Get ("cl_voipSendTarget", "spatial", 0);
cl_voipGainDuringCapture = Cvar_Get ("cl_voipGainDuringCapture", "0.2", CVAR_ARCHIVE);
cl_voipCaptureMult = Cvar_Get ("cl_voipCaptureMult", "2.0", CVAR_ARCHIVE);
cl_voipUseVAD = Cvar_Get ("cl_voipUseVAD", "0", CVAR_ARCHIVE);
cl_voipVADThreshold = Cvar_Get ("cl_voipVADThreshold", "0.25", CVAR_ARCHIVE);
cl_voipShowMeter = Cvar_Get ("cl_voipShowMeter", "1", CVAR_ARCHIVE);
cl_voip = Cvar_Get ("cl_voip", "1", CVAR_ARCHIVE);
Cvar_CheckRange( cl_voip, 0, 1, qtrue );
cl_voipProtocol = Cvar_Get ("cl_voipProtocol", cl_voip->integer ? "opus" : "", CVAR_USERINFO | CVAR_ROM);
#endif
Cvar_Get( "cg_autoactivate", "1", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "cg_emptyswitch", "0", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "cg_viewsize", "100", CVAR_ARCHIVE );
Cvar_Get ("cg_stereoSeparation", "0", CVAR_ROM);
cl_missionStats = Cvar_Get( "g_missionStats", "0", CVAR_ROM );
cl_waitForFire = Cvar_Get( "cl_waitForFire", "0", CVAR_ROM );
cl_language = Cvar_Get( "cl_language", "0", CVAR_ARCHIVE );
cl_debugTranslation = Cvar_Get( "cl_debugTranslation", "0", 0 );
Cmd_AddCommand( "cmd", CL_ForwardToServer_f );
Cmd_AddCommand( "configstrings", CL_Configstrings_f );
Cmd_AddCommand( "clientinfo", CL_Clientinfo_f );
Cmd_AddCommand( "snd_restart", CL_Snd_Restart_f );
Cmd_AddCommand( "vid_restart", CL_Vid_Restart_f );
Cmd_AddCommand( "disconnect", CL_Disconnect_f );
Cmd_AddCommand( "record", CL_Record_f );
Cmd_AddCommand( "demo", CL_PlayDemo_f );
Cmd_SetCommandCompletionFunc( "demo", CL_CompleteDemoName );
Cmd_AddCommand( "cinematic", CL_PlayCinematic_f );
Cmd_AddCommand( "stoprecord", CL_StopRecord_f );
Cmd_AddCommand( "connect", CL_Connect_f );
Cmd_AddCommand( "reconnect", CL_Reconnect_f );
Cmd_AddCommand( "localservers", CL_LocalServers_f );
Cmd_AddCommand( "globalservers", CL_GlobalServers_f );
Cmd_AddCommand( "rcon", CL_Rcon_f );
Cmd_SetCommandCompletionFunc( "rcon", CL_CompleteRcon );
Cmd_AddCommand( "ping", CL_Ping_f );
Cmd_AddCommand( "serverstatus", CL_ServerStatus_f );
Cmd_AddCommand( "showip", CL_ShowIP_f );
Cmd_AddCommand( "fs_openedList", CL_OpenedPK3List_f );
Cmd_AddCommand( "fs_referencedList", CL_ReferencedPK3List_f );
Cmd_AddCommand ("video", CL_Video_f );
Cmd_AddCommand ("stopvideo", CL_StopVideo_f );
Cmd_AddCommand( "cache_startgather", CL_Cache_StartGather_f );
Cmd_AddCommand( "cache_usedfile", CL_Cache_UsedFile_f );
Cmd_AddCommand( "cache_setindex", CL_Cache_SetIndex_f );
Cmd_AddCommand( "cache_mapchange", CL_Cache_MapChange_f );
Cmd_AddCommand( "cache_endgather", CL_Cache_EndGather_f );
Cmd_AddCommand( "updatehunkusage", CL_UpdateLevelHunkUsage );
Cmd_AddCommand( "updatescreen", SCR_UpdateScreen );
Cmd_AddCommand( "cld", CL_ClientDamageCommand );
Cmd_AddCommand( "startMultiplayer", CL_startMultiplayer_f ); // NERVE - SMF
Cmd_AddCommand( "shellExecute", CL_ShellExecute_URL_f );
Cmd_AddCommand( "map_restart", CL_MapRestart_f );
Cmd_AddCommand( "setRecommended", CL_SetRecommended_f );
CL_InitRef();
SCR_Init();
Cvar_Set( "cl_running", "1" );
CL_GenerateQKey();
Cvar_Get( "cl_guid", "", CVAR_USERINFO | CVAR_ROM );
CL_UpdateGUID( NULL, 0 );
Com_Printf( "----- Client Initialization Complete -----\n" );
}
|
void CL_Init( void ) {
Com_Printf( "----- Client Initialization -----\n" );
Con_Init();
if(!com_fullyInitialized)
{
CL_ClearState();
clc.state = CA_DISCONNECTED; // no longer CA_UNINITIALIZED
cl_oldGameSet = qfalse;
}
cls.realtime = 0;
CL_InitInput();
cl_noprint = Cvar_Get( "cl_noprint", "0", 0 );
#ifdef UPDATE_SERVER_NAME
cl_motd = Cvar_Get( "cl_motd", "1", 0 );
#endif
cl_timeout = Cvar_Get( "cl_timeout", "200", 0 );
cl_timeNudge = Cvar_Get( "cl_timeNudge", "0", CVAR_TEMP );
cl_shownet = Cvar_Get( "cl_shownet", "0", CVAR_TEMP );
cl_showSend = Cvar_Get( "cl_showSend", "0", CVAR_TEMP );
cl_showTimeDelta = Cvar_Get( "cl_showTimeDelta", "0", CVAR_TEMP );
cl_freezeDemo = Cvar_Get( "cl_freezeDemo", "0", CVAR_TEMP );
rcon_client_password = Cvar_Get( "rconPassword", "", CVAR_TEMP );
cl_activeAction = Cvar_Get( "activeAction", "", CVAR_TEMP );
cl_timedemo = Cvar_Get( "timedemo", "0", 0 );
cl_timedemoLog = Cvar_Get ("cl_timedemoLog", "", CVAR_ARCHIVE);
cl_autoRecordDemo = Cvar_Get ("cl_autoRecordDemo", "0", CVAR_ARCHIVE);
cl_aviFrameRate = Cvar_Get ("cl_aviFrameRate", "25", CVAR_ARCHIVE);
cl_aviMotionJpeg = Cvar_Get ("cl_aviMotionJpeg", "1", CVAR_ARCHIVE);
cl_avidemo = Cvar_Get( "cl_avidemo", "0", 0 );
cl_forceavidemo = Cvar_Get( "cl_forceavidemo", "0", 0 );
rconAddress = Cvar_Get( "rconAddress", "", 0 );
cl_yawspeed = Cvar_Get( "cl_yawspeed", "140", CVAR_ARCHIVE );
cl_pitchspeed = Cvar_Get( "cl_pitchspeed", "140", CVAR_ARCHIVE );
cl_anglespeedkey = Cvar_Get( "cl_anglespeedkey", "1.5", 0 );
cl_maxpackets = Cvar_Get( "cl_maxpackets", "38", CVAR_ARCHIVE );
cl_packetdup = Cvar_Get( "cl_packetdup", "1", CVAR_ARCHIVE );
cl_run = Cvar_Get( "cl_run", "1", CVAR_ARCHIVE );
cl_sensitivity = Cvar_Get( "sensitivity", "5", CVAR_ARCHIVE );
cl_mouseAccel = Cvar_Get( "cl_mouseAccel", "0", CVAR_ARCHIVE );
cl_freelook = Cvar_Get( "cl_freelook", "1", CVAR_ARCHIVE );
cl_mouseAccelStyle = Cvar_Get( "cl_mouseAccelStyle", "0", CVAR_ARCHIVE );
cl_mouseAccelOffset = Cvar_Get( "cl_mouseAccelOffset", "5", CVAR_ARCHIVE );
Cvar_CheckRange(cl_mouseAccelOffset, 0.001f, 50000.0f, qfalse);
cl_showMouseRate = Cvar_Get( "cl_showmouserate", "0", 0 );
cl_allowDownload = Cvar_Get( "cl_allowDownload", "0", CVAR_ARCHIVE );
#ifdef USE_CURL_DLOPEN
cl_cURLLib = Cvar_Get("cl_cURLLib", DEFAULT_CURL_LIB, CVAR_ARCHIVE);
#endif
Cvar_Get( "cg_autoswitch", "2", CVAR_ARCHIVE );
Cvar_Get( "cg_wolfparticles", "1", CVAR_ARCHIVE );
cl_conXOffset = Cvar_Get( "cl_conXOffset", "0", 0 );
cl_inGameVideo = Cvar_Get( "r_inGameVideo", "1", CVAR_ARCHIVE );
cl_serverStatusResendTime = Cvar_Get( "cl_serverStatusResendTime", "750", 0 );
cl_recoilPitch = Cvar_Get( "cg_recoilPitch", "0", CVAR_ROM );
m_pitch = Cvar_Get( "m_pitch", "0.022", CVAR_ARCHIVE );
m_yaw = Cvar_Get( "m_yaw", "0.022", CVAR_ARCHIVE );
m_forward = Cvar_Get( "m_forward", "0.25", CVAR_ARCHIVE );
m_side = Cvar_Get( "m_side", "0.25", CVAR_ARCHIVE );
m_filter = Cvar_Get( "m_filter", "0", CVAR_ARCHIVE );
j_pitch = Cvar_Get ("j_pitch", "0.022", CVAR_ARCHIVE);
j_yaw = Cvar_Get ("j_yaw", "-0.022", CVAR_ARCHIVE);
j_forward = Cvar_Get ("j_forward", "-0.25", CVAR_ARCHIVE);
j_side = Cvar_Get ("j_side", "0.25", CVAR_ARCHIVE);
j_up = Cvar_Get ("j_up", "0", CVAR_ARCHIVE);
j_pitch_axis = Cvar_Get ("j_pitch_axis", "3", CVAR_ARCHIVE);
j_yaw_axis = Cvar_Get ("j_yaw_axis", "2", CVAR_ARCHIVE);
j_forward_axis = Cvar_Get ("j_forward_axis", "1", CVAR_ARCHIVE);
j_side_axis = Cvar_Get ("j_side_axis", "0", CVAR_ARCHIVE);
j_up_axis = Cvar_Get ("j_up_axis", "4", CVAR_ARCHIVE);
Cvar_CheckRange(j_pitch_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_yaw_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_forward_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_side_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
Cvar_CheckRange(j_up_axis, 0, MAX_JOYSTICK_AXIS-1, qtrue);
cl_motdString = Cvar_Get( "cl_motdString", "", CVAR_ROM );
Cvar_Get( "cl_maxPing", "800", CVAR_ARCHIVE );
cl_lanForcePackets = Cvar_Get ("cl_lanForcePackets", "1", CVAR_ARCHIVE);
cl_guidServerUniq = Cvar_Get ("cl_guidServerUniq", "1", CVAR_ARCHIVE);
cl_consoleKeys = Cvar_Get( "cl_consoleKeys", "~ ` 0x7e 0x60", CVAR_ARCHIVE);
Cvar_Get( "name", "WolfPlayer", CVAR_USERINFO | CVAR_ARCHIVE );
cl_rate = Cvar_Get( "rate", "25000", CVAR_USERINFO | CVAR_ARCHIVE ); // NERVE - SMF - changed from 3000
Cvar_Get( "snaps", "20", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "model", "bj2", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "head", "default", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "color", "4", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "handicap", "100", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "sex", "male", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "cl_anonymous", "0", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "password", "", CVAR_USERINFO );
Cvar_Get( "cg_predictItems", "1", CVAR_USERINFO | CVAR_ARCHIVE );
#ifdef USE_MUMBLE
cl_useMumble = Cvar_Get ("cl_useMumble", "0", CVAR_ARCHIVE | CVAR_LATCH);
cl_mumbleScale = Cvar_Get ("cl_mumbleScale", "0.0254", CVAR_ARCHIVE);
#endif
#ifdef USE_VOIP
cl_voipSend = Cvar_Get ("cl_voipSend", "0", 0);
cl_voipSendTarget = Cvar_Get ("cl_voipSendTarget", "spatial", 0);
cl_voipGainDuringCapture = Cvar_Get ("cl_voipGainDuringCapture", "0.2", CVAR_ARCHIVE);
cl_voipCaptureMult = Cvar_Get ("cl_voipCaptureMult", "2.0", CVAR_ARCHIVE);
cl_voipUseVAD = Cvar_Get ("cl_voipUseVAD", "0", CVAR_ARCHIVE);
cl_voipVADThreshold = Cvar_Get ("cl_voipVADThreshold", "0.25", CVAR_ARCHIVE);
cl_voipShowMeter = Cvar_Get ("cl_voipShowMeter", "1", CVAR_ARCHIVE);
cl_voip = Cvar_Get ("cl_voip", "1", CVAR_ARCHIVE);
Cvar_CheckRange( cl_voip, 0, 1, qtrue );
cl_voipProtocol = Cvar_Get ("cl_voipProtocol", cl_voip->integer ? "opus" : "", CVAR_USERINFO | CVAR_ROM);
#endif
Cvar_Get( "cg_autoactivate", "1", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "cg_emptyswitch", "0", CVAR_USERINFO | CVAR_ARCHIVE );
Cvar_Get( "cg_viewsize", "100", CVAR_ARCHIVE );
Cvar_Get ("cg_stereoSeparation", "0", CVAR_ROM);
cl_missionStats = Cvar_Get( "g_missionStats", "0", CVAR_ROM );
cl_waitForFire = Cvar_Get( "cl_waitForFire", "0", CVAR_ROM );
cl_language = Cvar_Get( "cl_language", "0", CVAR_ARCHIVE );
cl_debugTranslation = Cvar_Get( "cl_debugTranslation", "0", 0 );
Cmd_AddCommand( "cmd", CL_ForwardToServer_f );
Cmd_AddCommand( "configstrings", CL_Configstrings_f );
Cmd_AddCommand( "clientinfo", CL_Clientinfo_f );
Cmd_AddCommand( "snd_restart", CL_Snd_Restart_f );
Cmd_AddCommand( "vid_restart", CL_Vid_Restart_f );
Cmd_AddCommand( "disconnect", CL_Disconnect_f );
Cmd_AddCommand( "record", CL_Record_f );
Cmd_AddCommand( "demo", CL_PlayDemo_f );
Cmd_SetCommandCompletionFunc( "demo", CL_CompleteDemoName );
Cmd_AddCommand( "cinematic", CL_PlayCinematic_f );
Cmd_AddCommand( "stoprecord", CL_StopRecord_f );
Cmd_AddCommand( "connect", CL_Connect_f );
Cmd_AddCommand( "reconnect", CL_Reconnect_f );
Cmd_AddCommand( "localservers", CL_LocalServers_f );
Cmd_AddCommand( "globalservers", CL_GlobalServers_f );
Cmd_AddCommand( "rcon", CL_Rcon_f );
Cmd_SetCommandCompletionFunc( "rcon", CL_CompleteRcon );
Cmd_AddCommand( "ping", CL_Ping_f );
Cmd_AddCommand( "serverstatus", CL_ServerStatus_f );
Cmd_AddCommand( "showip", CL_ShowIP_f );
Cmd_AddCommand( "fs_openedList", CL_OpenedPK3List_f );
Cmd_AddCommand( "fs_referencedList", CL_ReferencedPK3List_f );
Cmd_AddCommand ("video", CL_Video_f );
Cmd_AddCommand ("stopvideo", CL_StopVideo_f );
Cmd_AddCommand( "cache_startgather", CL_Cache_StartGather_f );
Cmd_AddCommand( "cache_usedfile", CL_Cache_UsedFile_f );
Cmd_AddCommand( "cache_setindex", CL_Cache_SetIndex_f );
Cmd_AddCommand( "cache_mapchange", CL_Cache_MapChange_f );
Cmd_AddCommand( "cache_endgather", CL_Cache_EndGather_f );
Cmd_AddCommand( "updatehunkusage", CL_UpdateLevelHunkUsage );
Cmd_AddCommand( "updatescreen", SCR_UpdateScreen );
Cmd_AddCommand( "cld", CL_ClientDamageCommand );
Cmd_AddCommand( "startMultiplayer", CL_startMultiplayer_f ); // NERVE - SMF
Cmd_AddCommand( "shellExecute", CL_ShellExecute_URL_f );
Cmd_AddCommand( "map_restart", CL_MapRestart_f );
Cmd_AddCommand( "setRecommended", CL_SetRecommended_f );
CL_InitRef();
SCR_Init();
Cvar_Set( "cl_running", "1" );
CL_GenerateQKey();
Cvar_Get( "cl_guid", "", CVAR_USERINFO | CVAR_ROM );
CL_UpdateGUID( NULL, 0 );
Com_Printf( "----- Client Initialization Complete -----\n" );
}
|
C
|
OpenJK
| 1 |
CVE-2016-5773
|
https://www.cvedetails.com/cve/CVE-2016-5773/
|
CWE-416
|
https://github.com/php/php-src/commit/f6aef68089221c5ea047d4a74224ee3deead99a6?w=1
|
f6aef68089221c5ea047d4a74224ee3deead99a6?w=1
|
Fix bug #72434: ZipArchive class Use After Free Vulnerability in PHP's GC algorithm and unserialize
|
static ZIPARCHIVE_METHOD(addEmptyDir)
{
struct zip *intern;
zval *this = getThis();
char *dirname;
int dirname_len;
int idx;
struct zip_stat sb;
char *s;
if (!this) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, this);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s",
&dirname, &dirname_len) == FAILURE) {
return;
}
if (dirname_len<1) {
RETURN_FALSE;
}
if (dirname[dirname_len-1] != '/') {
s=(char *)emalloc(dirname_len+2);
strcpy(s, dirname);
s[dirname_len] = '/';
s[dirname_len+1] = '\0';
} else {
s = dirname;
}
idx = zip_stat(intern, s, 0, &sb);
if (idx >= 0) {
RETVAL_FALSE;
} else {
if (zip_add_dir(intern, (const char *)s) == -1) {
RETVAL_FALSE;
}
RETVAL_TRUE;
}
if (s != dirname) {
efree(s);
}
}
|
static ZIPARCHIVE_METHOD(addEmptyDir)
{
struct zip *intern;
zval *this = getThis();
char *dirname;
int dirname_len;
int idx;
struct zip_stat sb;
char *s;
if (!this) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, this);
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s",
&dirname, &dirname_len) == FAILURE) {
return;
}
if (dirname_len<1) {
RETURN_FALSE;
}
if (dirname[dirname_len-1] != '/') {
s=(char *)emalloc(dirname_len+2);
strcpy(s, dirname);
s[dirname_len] = '/';
s[dirname_len+1] = '\0';
} else {
s = dirname;
}
idx = zip_stat(intern, s, 0, &sb);
if (idx >= 0) {
RETVAL_FALSE;
} else {
if (zip_add_dir(intern, (const char *)s) == -1) {
RETVAL_FALSE;
}
RETVAL_TRUE;
}
if (s != dirname) {
efree(s);
}
}
|
C
|
php-src
| 0 |
CVE-2019-13454
|
https://www.cvedetails.com/cve/CVE-2019-13454/
|
CWE-369
|
https://github.com/ImageMagick/ImageMagick/commit/1ddcf2e4f28029a888cadef2e757509ef5047ad8
|
1ddcf2e4f28029a888cadef2e757509ef5047ad8
|
https://github.com/ImageMagick/ImageMagick/issues/1629
|
static inline void CompositeCanvas(Image *destination,
const CompositeOperator compose,Image *source,ssize_t x_offset,
ssize_t y_offset,ExceptionInfo *exception)
{
const char
*value;
x_offset+=source->page.x-destination->page.x;
y_offset+=source->page.y-destination->page.y;
value=GetImageArtifact(source,"compose:outside-overlay");
(void) CompositeImage(destination,source,compose,
(value != (const char *) NULL) && (IsStringTrue(value) != MagickFalse) ?
MagickFalse : MagickTrue,x_offset,y_offset,exception);
}
|
static inline void CompositeCanvas(Image *destination,
const CompositeOperator compose,Image *source,ssize_t x_offset,
ssize_t y_offset,ExceptionInfo *exception)
{
const char
*value;
x_offset+=source->page.x-destination->page.x;
y_offset+=source->page.y-destination->page.y;
value=GetImageArtifact(source,"compose:outside-overlay");
(void) CompositeImage(destination,source,compose,
(value != (const char *) NULL) && (IsStringTrue(value) != MagickFalse) ?
MagickFalse : MagickTrue,x_offset,y_offset,exception);
}
|
C
|
ImageMagick6
| 0 |
CVE-2012-2875
|
https://www.cvedetails.com/cve/CVE-2012-2875/
| null |
https://github.com/chromium/chromium/commit/d345af9ed62ee5f431be327967f41c3cc3fe936a
|
d345af9ed62ee5f431be327967f41c3cc3fe936a
|
[BlackBerry] Adapt to new BlackBerry::Platform::TouchPoint API
https://bugs.webkit.org/show_bug.cgi?id=105143
RIM PR 171941
Reviewed by Rob Buis.
Internally reviewed by George Staikos.
Source/WebCore:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit.
Also adapt to new method names and encapsulation of TouchPoint data
members.
No change in behavior, no new tests.
* platform/blackberry/PlatformTouchPointBlackBerry.cpp:
(WebCore::PlatformTouchPoint::PlatformTouchPoint):
Source/WebKit/blackberry:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit. One exception
is when passing events to a full screen plugin.
Also adapt to new method names and encapsulation of TouchPoint data
members.
* Api/WebPage.cpp:
(BlackBerry::WebKit::WebPage::touchEvent):
(BlackBerry::WebKit::WebPage::touchPointAsMouseEvent):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchEventToFullScreenPlugin):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchPointAsMouseEventToFullScreenPlugin):
* WebKitSupport/InputHandler.cpp:
(BlackBerry::WebKit::InputHandler::shouldRequestSpellCheckingOptionsForPoint):
* WebKitSupport/InputHandler.h:
(InputHandler):
* WebKitSupport/TouchEventHandler.cpp:
(BlackBerry::WebKit::TouchEventHandler::doFatFingers):
(BlackBerry::WebKit::TouchEventHandler::handleTouchPoint):
* WebKitSupport/TouchEventHandler.h:
(TouchEventHandler):
Tools:
Adapt to new method names and encapsulation of TouchPoint data members.
* DumpRenderTree/blackberry/EventSender.cpp:
(addTouchPointCallback):
(updateTouchPointCallback):
(touchEndCallback):
(releaseTouchPointCallback):
(sendTouchEvent):
git-svn-id: svn://svn.chromium.org/blink/trunk@137880 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void WebPagePrivate::layoutFinished()
{
if (!m_contentsSizeChanged && !m_overflowExceedsContentsSize)
return;
m_contentsSizeChanged = false; // Toggle to turn off notification again.
m_overflowExceedsContentsSize = false;
if (contentsSize().isEmpty())
return;
if (didLayoutExceedMaximumIterations()) {
notifyTransformedContentsSizeChanged();
return;
}
IntSize previousContentsSize = m_previousContentsSize;
m_nestedLayoutFinishedCount++;
if (shouldZoomToInitialScaleOnLoad()) {
zoomToInitialScaleOnLoad();
m_shouldZoomToInitialScaleAfterLoadFinished = false;
} else if (loadState() != None)
notifyTransformedContentsSizeChanged();
m_nestedLayoutFinishedCount--;
if (!m_nestedLayoutFinishedCount) {
if (contentsSize() != previousContentsSize) {
IntPoint newScrollPosition = scrollPosition();
if (contentsSize().height() < previousContentsSize.height()) {
IntPoint scrollPositionWithHeightShrunk = IntPoint(newScrollPosition.x(), maximumScrollPosition().y());
newScrollPosition = newScrollPosition.shrunkTo(scrollPositionWithHeightShrunk);
}
if (contentsSize().width() < previousContentsSize.width()) {
IntPoint scrollPositionWithWidthShrunk = IntPoint(maximumScrollPosition().x(), newScrollPosition.y());
newScrollPosition = newScrollPosition.shrunkTo(scrollPositionWithWidthShrunk);
}
if (newScrollPosition != scrollPosition()) {
setScrollPosition(newScrollPosition);
notifyTransformedScrollChanged();
}
}
}
}
|
void WebPagePrivate::layoutFinished()
{
if (!m_contentsSizeChanged && !m_overflowExceedsContentsSize)
return;
m_contentsSizeChanged = false; // Toggle to turn off notification again.
m_overflowExceedsContentsSize = false;
if (contentsSize().isEmpty())
return;
if (didLayoutExceedMaximumIterations()) {
notifyTransformedContentsSizeChanged();
return;
}
IntSize previousContentsSize = m_previousContentsSize;
m_nestedLayoutFinishedCount++;
if (shouldZoomToInitialScaleOnLoad()) {
zoomToInitialScaleOnLoad();
m_shouldZoomToInitialScaleAfterLoadFinished = false;
} else if (loadState() != None)
notifyTransformedContentsSizeChanged();
m_nestedLayoutFinishedCount--;
if (!m_nestedLayoutFinishedCount) {
if (contentsSize() != previousContentsSize) {
IntPoint newScrollPosition = scrollPosition();
if (contentsSize().height() < previousContentsSize.height()) {
IntPoint scrollPositionWithHeightShrunk = IntPoint(newScrollPosition.x(), maximumScrollPosition().y());
newScrollPosition = newScrollPosition.shrunkTo(scrollPositionWithHeightShrunk);
}
if (contentsSize().width() < previousContentsSize.width()) {
IntPoint scrollPositionWithWidthShrunk = IntPoint(maximumScrollPosition().x(), newScrollPosition.y());
newScrollPosition = newScrollPosition.shrunkTo(scrollPositionWithWidthShrunk);
}
if (newScrollPosition != scrollPosition()) {
setScrollPosition(newScrollPosition);
notifyTransformedScrollChanged();
}
}
}
}
|
C
|
Chrome
| 0 |
CVE-2017-5122
|
https://www.cvedetails.com/cve/CVE-2017-5122/
|
CWE-119
|
https://github.com/chromium/chromium/commit/f8675cbb337440a11bf9afb10ea11bae42bb92cb
|
f8675cbb337440a11bf9afb10ea11bae42bb92cb
|
cros: Enable some tests in //ash/wm in ash_unittests --mash
For the ones that fail, disable them via filter file instead of in the
code, per our disablement policy.
Bug: 698085, 695556, 698878, 698888, 698093, 698894
Test: ash_unittests --mash
Change-Id: Ic145ab6a95508968d6884d14fac2a3ca08888d26
Reviewed-on: https://chromium-review.googlesource.com/752423
Commit-Queue: James Cook <[email protected]>
Reviewed-by: Steven Bennetts <[email protected]>
Cr-Commit-Position: refs/heads/master@{#513836}
|
int GetPostCountAndReset() {
int r = post_count_;
post_count_ = 0;
return r;
}
|
int GetPostCountAndReset() {
int r = post_count_;
post_count_ = 0;
return r;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b
|
3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b
|
Support pausing media when a context is frozen.
Media is resumed when the context is unpaused. This feature will be used
for bfcache and pausing iframes feature policy.
BUG=907125
Change-Id: Ic3925ea1a4544242b7bf0b9ad8c9cb9f63976bbd
Reviewed-on: https://chromium-review.googlesource.com/c/1410126
Commit-Queue: Dave Tapuska <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Reviewed-by: Mounir Lamouri <[email protected]>
Cr-Commit-Position: refs/heads/master@{#623319}
|
void HTMLMediaElement::RemotePlaybackCompatibilityChanged(const WebURL& url,
bool is_compatible) {
if (RuntimeEnabledFeatures::NewRemotePlaybackPipelineEnabled() &&
RemotePlaybackClient()) {
RemotePlaybackClient()->SourceChanged(url, is_compatible);
}
}
|
void HTMLMediaElement::RemotePlaybackCompatibilityChanged(const WebURL& url,
bool is_compatible) {
if (RuntimeEnabledFeatures::NewRemotePlaybackPipelineEnabled() &&
RemotePlaybackClient()) {
RemotePlaybackClient()->SourceChanged(url, is_compatible);
}
}
|
C
|
Chrome
| 0 |
CVE-2018-15861
|
https://www.cvedetails.com/cve/CVE-2018-15861/
|
CWE-476
|
https://github.com/xkbcommon/libxkbcommon/commit/38e1766bc6e20108948aec8a0b222a4bad0254e9
|
38e1766bc6e20108948aec8a0b222a4bad0254e9
|
xkbcomp: Don't falsely promise from ExprResolveLhs
Every user of ExprReturnLhs goes on to unconditionally dereference the
field return, which can be NULL if xkb_intern_atom fails. Return false
if this is the case, so we fail safely.
testcase: splice geometry data into interp
Signed-off-by: Daniel Stone <[email protected]>
|
LookupModMask(struct xkb_context *ctx, const void *priv, xkb_atom_t field,
enum expr_value_type type, xkb_mod_mask_t *val_rtrn)
{
const char *str;
xkb_mod_index_t ndx;
const LookupModMaskPriv *arg = priv;
const struct xkb_mod_set *mods = arg->mods;
enum mod_type mod_type = arg->mod_type;
if (type != EXPR_TYPE_INT)
return false;
str = xkb_atom_text(ctx, field);
if (!str)
return false;
if (istreq(str, "all")) {
*val_rtrn = MOD_REAL_MASK_ALL;
return true;
}
if (istreq(str, "none")) {
*val_rtrn = 0;
return true;
}
ndx = XkbModNameToIndex(mods, field, mod_type);
if (ndx == XKB_MOD_INVALID)
return false;
*val_rtrn = (1u << ndx);
return true;
}
|
LookupModMask(struct xkb_context *ctx, const void *priv, xkb_atom_t field,
enum expr_value_type type, xkb_mod_mask_t *val_rtrn)
{
const char *str;
xkb_mod_index_t ndx;
const LookupModMaskPriv *arg = priv;
const struct xkb_mod_set *mods = arg->mods;
enum mod_type mod_type = arg->mod_type;
if (type != EXPR_TYPE_INT)
return false;
str = xkb_atom_text(ctx, field);
if (!str)
return false;
if (istreq(str, "all")) {
*val_rtrn = MOD_REAL_MASK_ALL;
return true;
}
if (istreq(str, "none")) {
*val_rtrn = 0;
return true;
}
ndx = XkbModNameToIndex(mods, field, mod_type);
if (ndx == XKB_MOD_INVALID)
return false;
*val_rtrn = (1u << ndx);
return true;
}
|
C
|
libxkbcommon
| 0 |
CVE-2017-15923
|
https://www.cvedetails.com/cve/CVE-2017-15923/
| null |
https://cgit.kde.org/konversation.git/commit/?h=1.7&id=6a7f59ee1b9dbc6e5cf9e5f3b306504d02b73ef0
|
6a7f59ee1b9dbc6e5cf9e5f3b306504d02b73ef0
| null |
QMimeData *IRCView::createMimeDataFromSelection() const
{
const QTextDocumentFragment fragment(textCursor());
return new IrcViewMimeData(fragment);
}
|
QMimeData *IRCView::createMimeDataFromSelection() const
{
const QTextDocumentFragment fragment(textCursor());
return new IrcViewMimeData(fragment);
}
|
CPP
|
kde
| 0 |
CVE-2013-6368
|
https://www.cvedetails.com/cve/CVE-2013-6368/
|
CWE-20
|
https://github.com/torvalds/linux/commit/fda4e2e85589191b123d31cdc21fd33ee70f50fd
|
fda4e2e85589191b123d31cdc21fd33ee70f50fd
|
KVM: x86: Convert vapic synchronization to _cached functions (CVE-2013-6368)
In kvm_lapic_sync_from_vapic and kvm_lapic_sync_to_vapic there is the
potential to corrupt kernel memory if userspace provides an address that
is at the end of a page. This patches concerts those functions to use
kvm_write_guest_cached and kvm_read_guest_cached. It also checks the
vapic_address specified by userspace during ioctl processing and returns
an error to userspace if the address is not a valid GPA.
This is generally not guest triggerable, because the required write is
done by firmware that runs before the guest. Also, it only affects AMD
processors and oldish Intel that do not have the FlexPriority feature
(unless you disable FlexPriority, of course; then newer processors are
also affected).
Fixes: b93463aa59d6 ('KVM: Accelerated apic support')
Reported-by: Andrew Honig <[email protected]>
Cc: [email protected]
Signed-off-by: Andrew Honig <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
kvm_kvfree(free->arch.rmap[i]);
free->arch.rmap[i] = NULL;
}
if (i == 0)
continue;
if (!dont || free->arch.lpage_info[i - 1] !=
dont->arch.lpage_info[i - 1]) {
kvm_kvfree(free->arch.lpage_info[i - 1]);
free->arch.lpage_info[i - 1] = NULL;
}
}
}
|
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
kvm_kvfree(free->arch.rmap[i]);
free->arch.rmap[i] = NULL;
}
if (i == 0)
continue;
if (!dont || free->arch.lpage_info[i - 1] !=
dont->arch.lpage_info[i - 1]) {
kvm_kvfree(free->arch.lpage_info[i - 1]);
free->arch.lpage_info[i - 1] = NULL;
}
}
}
|
C
|
linux
| 0 |
CVE-2016-6720
|
https://www.cvedetails.com/cve/CVE-2016-6720/
|
CWE-200
|
https://android.googlesource.com/platform/frameworks/av/+/7c88b498fda1c2b608a9dd73960a2fd4d7b7e3f7
|
7c88b498fda1c2b608a9dd73960a2fd4d7b7e3f7
|
IOMX: allow configuration after going to loaded state
This was disallowed recently but we still use it as MediaCodcec.stop
only goes to loaded state, and does not free component.
Bug: 31450460
Change-Id: I72e092e4e55c9f23b1baee3e950d76e84a5ef28d
(cherry picked from commit e03b22839d78c841ce0a1a0a1ee1960932188b0b)
|
OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(
OMX::buffer_id buffer, OMX_U32 portIndex) {
if (buffer == 0) {
return NULL;
}
Mutex::Autolock autoLock(mBufferIDLock);
ssize_t index = mBufferIDToBufferHeader.indexOfKey(buffer);
if (index < 0) {
CLOGW("findBufferHeader: buffer %u not found", buffer);
return NULL;
}
OMX_BUFFERHEADERTYPE *header = mBufferIDToBufferHeader.valueAt(index);
BufferMeta *buffer_meta =
static_cast<BufferMeta *>(header->pAppPrivate);
if (buffer_meta->getPortIndex() != portIndex) {
CLOGW("findBufferHeader: buffer %u found but with incorrect port index.", buffer);
android_errorWriteLog(0x534e4554, "28816827");
return NULL;
}
return header;
}
|
OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(
OMX::buffer_id buffer, OMX_U32 portIndex) {
if (buffer == 0) {
return NULL;
}
Mutex::Autolock autoLock(mBufferIDLock);
ssize_t index = mBufferIDToBufferHeader.indexOfKey(buffer);
if (index < 0) {
CLOGW("findBufferHeader: buffer %u not found", buffer);
return NULL;
}
OMX_BUFFERHEADERTYPE *header = mBufferIDToBufferHeader.valueAt(index);
BufferMeta *buffer_meta =
static_cast<BufferMeta *>(header->pAppPrivate);
if (buffer_meta->getPortIndex() != portIndex) {
CLOGW("findBufferHeader: buffer %u found but with incorrect port index.", buffer);
android_errorWriteLog(0x534e4554, "28816827");
return NULL;
}
return header;
}
|
C
|
Android
| 0 |
CVE-2012-2895
|
https://www.cvedetails.com/cve/CVE-2012-2895/
|
CWE-119
|
https://github.com/chromium/chromium/commit/16dcd30c215801941d9890859fd79a234128fc3e
|
16dcd30c215801941d9890859fd79a234128fc3e
|
Refactors to simplify rename pathway in DownloadFileManager.
This is https://chromiumcodereview.appspot.com/10668004 / r144817 (reverted
due to CrOS failure) with the completion logic moved to after the
auto-opening. The tests that test the auto-opening (for web store install)
were waiting for download completion to check install, and hence were
failing when completion was moved earlier.
Doing this right would probably require another state (OPENED).
BUG=123998
BUG-134930
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10701040
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145157 0039d316-1c4b-4281-b951-d872f2087c98
|
void DownloadItemImpl::Remove() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
delegate_->AssertStateConsistent(this);
Cancel(true);
delegate_->AssertStateConsistent(this);
TransitionTo(REMOVING);
delegate_->DownloadRemoved(this);
}
|
void DownloadItemImpl::Remove() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
delegate_->AssertStateConsistent(this);
Cancel(true);
delegate_->AssertStateConsistent(this);
TransitionTo(REMOVING);
delegate_->DownloadRemoved(this);
}
|
C
|
Chrome
| 0 |
CVE-2013-0893
|
https://www.cvedetails.com/cve/CVE-2013-0893/
|
CWE-362
|
https://github.com/chromium/chromium/commit/ed6f4545a2a345697e07908c887333f5bdcc97a3
|
ed6f4545a2a345697e07908c887333f5bdcc97a3
|
Apply 'x-content-type-options' check to dynamically inserted script.
BUG=348581
Review URL: https://codereview.chromium.org/185593011
git-svn-id: svn://svn.chromium.org/blink/trunk@168570 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
ScriptLoader* toScriptLoaderIfPossible(Element* element)
{
if (isHTMLScriptLoader(element))
return toHTMLScriptElement(element)->loader();
if (isSVGScriptLoader(element))
return toSVGScriptElement(element)->loader();
return 0;
}
|
ScriptLoader* toScriptLoaderIfPossible(Element* element)
{
if (isHTMLScriptLoader(element))
return toHTMLScriptElement(element)->loader();
if (isSVGScriptLoader(element))
return toSVGScriptElement(element)->loader();
return 0;
}
|
C
|
Chrome
| 0 |
CVE-2011-1799
|
https://www.cvedetails.com/cve/CVE-2011-1799/
|
CWE-20
|
https://github.com/chromium/chromium/commit/5fd35e5359c6345b8709695cd71fba307318e6aa
|
5fd35e5359c6345b8709695cd71fba307318e6aa
|
Source/WebCore: Fix for bug 64046 - Wrong image height in absolutely positioned div in
relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <[email protected]> on 2011-07-21
Reviewed by David Hyatt.
Test: fast/css/absolute-child-with-percent-height-inside-relative-parent.html
* rendering/RenderBox.cpp:
(WebCore::RenderBox::availableLogicalHeightUsing):
LayoutTests: Test to cover absolutely positioned child with percentage height
in relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <[email protected]> on 2011-07-21
Reviewed by David Hyatt.
* fast/css/absolute-child-with-percent-height-inside-relative-parent-expected.txt: Added.
* fast/css/absolute-child-with-percent-height-inside-relative-parent.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@91533 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
InlineBox* RenderBox::createInlineBox()
{
return new (renderArena()) InlineBox(this);
}
|
InlineBox* RenderBox::createInlineBox()
{
return new (renderArena()) InlineBox(this);
}
|
C
|
Chrome
| 0 |
CVE-2018-11469
|
https://www.cvedetails.com/cve/CVE-2018-11469/
|
CWE-200
|
https://git.haproxy.org/?p=haproxy-1.8.git;a=commit;h=17514045e5d934dede62116216c1b016fe23dd06
|
17514045e5d934dede62116216c1b016fe23dd06
| null |
int apply_filters_to_response(struct stream *s, struct channel *rtr, struct proxy *px)
{
struct session *sess = s->sess;
struct http_txn *txn = s->txn;
struct hdr_exp *exp;
for (exp = px->rsp_exp; exp; exp = exp->next) {
int ret;
/*
* The interleaving of transformations and verdicts
* makes it difficult to decide to continue or stop
* the evaluation.
*/
if (txn->flags & TX_SVDENY)
break;
if ((txn->flags & TX_SVALLOW) &&
(exp->action == ACT_ALLOW || exp->action == ACT_DENY ||
exp->action == ACT_PASS)) {
exp = exp->next;
continue;
}
/* if this filter had a condition, evaluate it now and skip to
* next filter if the condition does not match.
*/
if (exp->cond) {
ret = acl_exec_cond(exp->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
ret = acl_pass(ret);
if (((struct acl_cond *)exp->cond)->pol == ACL_COND_UNLESS)
ret = !ret;
if (!ret)
continue;
}
/* Apply the filter to the status line. */
ret = apply_filter_to_sts_line(s, rtr, exp);
if (unlikely(ret < 0))
return -1;
if (likely(ret == 0)) {
/* The filter did not match the response, it can be
* iterated through all headers.
*/
if (unlikely(apply_filter_to_resp_headers(s, rtr, exp) < 0))
return -1;
}
}
return 0;
}
|
int apply_filters_to_response(struct stream *s, struct channel *rtr, struct proxy *px)
{
struct session *sess = s->sess;
struct http_txn *txn = s->txn;
struct hdr_exp *exp;
for (exp = px->rsp_exp; exp; exp = exp->next) {
int ret;
/*
* The interleaving of transformations and verdicts
* makes it difficult to decide to continue or stop
* the evaluation.
*/
if (txn->flags & TX_SVDENY)
break;
if ((txn->flags & TX_SVALLOW) &&
(exp->action == ACT_ALLOW || exp->action == ACT_DENY ||
exp->action == ACT_PASS)) {
exp = exp->next;
continue;
}
/* if this filter had a condition, evaluate it now and skip to
* next filter if the condition does not match.
*/
if (exp->cond) {
ret = acl_exec_cond(exp->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
ret = acl_pass(ret);
if (((struct acl_cond *)exp->cond)->pol == ACL_COND_UNLESS)
ret = !ret;
if (!ret)
continue;
}
/* Apply the filter to the status line. */
ret = apply_filter_to_sts_line(s, rtr, exp);
if (unlikely(ret < 0))
return -1;
if (likely(ret == 0)) {
/* The filter did not match the response, it can be
* iterated through all headers.
*/
if (unlikely(apply_filter_to_resp_headers(s, rtr, exp) < 0))
return -1;
}
}
return 0;
}
|
C
|
haproxy
| 0 |
CVE-2011-3896
|
https://www.cvedetails.com/cve/CVE-2011-3896/
|
CWE-119
|
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
|
5925dff83699508b5e2735afb0297dfb310e159d
|
Implement a bubble that appears at the top of the screen when a tab enters
fullscreen mode via webkitRequestFullScreen(), telling the user how to exit
fullscreen.
This is implemented as an NSView rather than an NSWindow because the floating
chrome that appears in presentation mode should overlap the bubble.
Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac:
the mode in which the UI is hidden, accessible by moving the cursor to the top
of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode.
On Lion, however, fullscreen mode does not imply presentation mode: in
non-presentation fullscreen mode, the chrome is permanently shown. It is
possible to switch between presentation mode and fullscreen mode using the
presentation mode UI control.
When a tab initiates fullscreen mode on Lion, we enter presentation mode if not
in presentation mode already. When the user exits fullscreen mode using Chrome
UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we
return the user to the mode they were in before the tab entered fullscreen.
BUG=14471
TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen.
Need to test the Lion logic somehow, with no Lion trybots.
BUG=96883
Original review http://codereview.chromium.org/7890056/
TBR=thakis
Review URL: http://codereview.chromium.org/7920024
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
|
TabContentsWrapper* Browser::AddBlankTab(bool foreground) {
return AddBlankTabAt(-1, foreground);
}
|
TabContentsWrapper* Browser::AddBlankTab(bool foreground) {
return AddBlankTabAt(-1, foreground);
}
|
C
|
Chrome
| 0 |
CVE-2018-8099
|
https://www.cvedetails.com/cve/CVE-2018-8099/
|
CWE-415
|
https://github.com/libgit2/libgit2/commit/58a6fe94cb851f71214dbefac3f9bffee437d6fe
|
58a6fe94cb851f71214dbefac3f9bffee437d6fe
|
index: convert `read_entry` to return entry size via an out-param
The function `read_entry` does not conform to our usual coding style of
returning stuff via the out parameter and to use the return value for
reporting errors. Due to most of our code conforming to that pattern, it
has become quite natural for us to actually return `-1` in case there is
any error, which has also slipped in with commit 5625d86b9 (index:
support index v4, 2016-05-17). As the function returns an `size_t` only,
though, the return value is wrapped around, causing the caller of
`read_tree` to continue with an invalid index entry. Ultimately, this
can lead to a double-free.
Improve code and fix the bug by converting the function to return the
index entry size via an out parameter and only using the return value to
indicate errors.
Reported-by: Krishna Ram Prakash R <[email protected]>
Reported-by: Vivek Parikh <[email protected]>
|
git_repository *git_index_owner(const git_index *index)
{
return INDEX_OWNER(index);
}
|
git_repository *git_index_owner(const git_index *index)
{
return INDEX_OWNER(index);
}
|
C
|
libgit2
| 0 |
CVE-2014-3645
|
https://www.cvedetails.com/cve/CVE-2014-3645/
|
CWE-20
|
https://github.com/torvalds/linux/commit/bfd0a56b90005f8c8a004baf407ad90045c2b11e
|
bfd0a56b90005f8c8a004baf407ad90045c2b11e
|
nEPT: Nested INVEPT
If we let L1 use EPT, we should probably also support the INVEPT instruction.
In our current nested EPT implementation, when L1 changes its EPT table
for L2 (i.e., EPT12), L0 modifies the shadow EPT table (EPT02), and in
the course of this modification already calls INVEPT. But if last level
of shadow page is unsync not all L1's changes to EPT12 are intercepted,
which means roots need to be synced when L1 calls INVEPT. Global INVEPT
should not be different since roots are synced by kvm_mmu_load() each
time EPTP02 changes.
Reviewed-by: Xiao Guangrong <[email protected]>
Signed-off-by: Nadav Har'El <[email protected]>
Signed-off-by: Jun Nakajima <[email protected]>
Signed-off-by: Xinhao Xu <[email protected]>
Signed-off-by: Yang Zhang <[email protected]>
Signed-off-by: Gleb Natapov <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp, *nsp;
if (list_empty(invalid_list))
return;
/*
* wmb: make sure everyone sees our modifications to the page tables
* rmb: make sure we see changes to vcpu->mode
*/
smp_mb();
/*
* Wait for all vcpus to exit guest mode and/or lockless shadow
* page table walks.
*/
kvm_flush_remote_tlbs(kvm);
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count);
kvm_mmu_free_page(sp);
}
}
|
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp, *nsp;
if (list_empty(invalid_list))
return;
/*
* wmb: make sure everyone sees our modifications to the page tables
* rmb: make sure we see changes to vcpu->mode
*/
smp_mb();
/*
* Wait for all vcpus to exit guest mode and/or lockless shadow
* page table walks.
*/
kvm_flush_remote_tlbs(kvm);
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count);
kvm_mmu_free_page(sp);
}
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/a5333583f14284a411abac2fef7caed889a8bba3
|
a5333583f14284a411abac2fef7caed889a8bba3
|
Wire InstallFinished and add some InstallEvent.waitUntil tests
BUG=285976
TEST=content_browsertests:ServiceWorkerVersionBrowserTest.Install*
Committed: https://src.chromium.org/viewvc/chrome?view=rev&revision=250804
Review URL: https://codereview.chromium.org/153553008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@250936 0039d316-1c4b-4281-b951-d872f2087c98
|
void EmbeddedWorkerContextClient::workerContextStarted(
blink::WebServiceWorkerContextProxy* proxy) {
DCHECK(!worker_task_runner_);
worker_task_runner_ = new WorkerThreadTaskRunner(
WorkerTaskRunner::Instance()->CurrentWorkerId());
DCHECK_NE(0, WorkerTaskRunner::Instance()->CurrentWorkerId());
DCHECK(g_worker_client_tls.Pointer()->Get() == NULL);
DCHECK(!script_context_);
g_worker_client_tls.Pointer()->Set(this);
script_context_.reset(new ServiceWorkerScriptContext(this, proxy));
worker_task_runner_->PostTask(
FROM_HERE,
base::Bind(&EmbeddedWorkerContextClient::SendWorkerStarted,
weak_factory_.GetWeakPtr()));
}
|
void EmbeddedWorkerContextClient::workerContextStarted(
blink::WebServiceWorkerContextProxy* proxy) {
DCHECK(!worker_task_runner_);
worker_task_runner_ = new WorkerThreadTaskRunner(
WorkerTaskRunner::Instance()->CurrentWorkerId());
DCHECK_NE(0, WorkerTaskRunner::Instance()->CurrentWorkerId());
DCHECK(g_worker_client_tls.Pointer()->Get() == NULL);
DCHECK(!script_context_);
g_worker_client_tls.Pointer()->Set(this);
script_context_.reset(new ServiceWorkerScriptContext(this, proxy));
worker_task_runner_->PostTask(
FROM_HERE,
base::Bind(&EmbeddedWorkerContextClient::SendWorkerStarted,
weak_factory_.GetWeakPtr()));
}
|
C
|
Chrome
| 0 |
CVE-2015-6252
|
https://www.cvedetails.com/cve/CVE-2015-6252/
|
CWE-399
|
https://github.com/torvalds/linux/commit/7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
|
7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
|
vhost: actually track log eventfd file
While reviewing vhost log code, I found out that log_file is never
set. Note: I haven't tested the change (QEMU doesn't use LOG_FD yet).
Cc: [email protected]
Signed-off-by: Marc-André Lureau <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
int vhost_log_access_ok(struct vhost_dev *dev)
{
return memory_access_ok(dev, dev->memory, 1);
}
|
int vhost_log_access_ok(struct vhost_dev *dev)
{
return memory_access_ok(dev, dev->memory, 1);
}
|
C
|
linux
| 0 |
CVE-2015-6763
|
https://www.cvedetails.com/cve/CVE-2015-6763/
| null |
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
|
MacViews: Enable secure text input for password Textfields.
In Cocoa the NSTextInputContext automatically enables secure text input
when activated and it's in the secure text entry mode.
RenderWidgetHostViewMac did the similar thing for ages following the
WebKit example.
views::Textfield needs to do the same thing in a fashion that's
sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions
are possible when the Textfield gets focus, activates the secure text
input mode and the RWHVM loses focus immediately afterwards and disables
the secure text input instead of leaving it in the enabled state.
BUG=818133,677220
Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b
Reviewed-on: https://chromium-review.googlesource.com/943064
Commit-Queue: Michail Pishchagin <[email protected]>
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Avi Drissman <[email protected]>
Reviewed-by: Peter Kasting <[email protected]>
Cr-Commit-Position: refs/heads/master@{#542517}
|
String InputType::SanitizeValue(const String& proposed_value) const {
return proposed_value;
}
|
String InputType::SanitizeValue(const String& proposed_value) const {
return proposed_value;
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/fb83de09f2c986ee91741f3a2776feea0e18e3f6
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
|
Revert "[Picture in Picture] Call parent function in OnGestureEvent."
This reverts commit e60d9aef9d1eeeff4e5954ba137ed5009261f626.
Reason for revert: Causes the close button to receive gesture events even when it's not the target of the tap. This causes the PiP window to unexpectedly close.
Bug: 895773
Original change's description:
> [Picture in Picture] Call parent function in OnGestureEvent.
>
> Change-Id: I854654be22abd217c3f8ed557bc3fb9118c557c6
> Reviewed-on: https://chromium-review.googlesource.com/1192326
> Reviewed-by: CJ DiMeglio <[email protected]>
> Commit-Queue: apacible <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#586820}
# Not skipping CQ checks because original CL landed > 1 day ago.
Change-Id: I2f36d78713f0b811a0a2681e09284c394e146a5c
Reviewed-on: https://chromium-review.googlesource.com/c/1318397
Commit-Queue: Tommy Steimel <[email protected]>
Reviewed-by: CJ DiMeglio <[email protected]>
Reviewed-by: Mounir Lamouri <[email protected]>
Cr-Commit-Position: refs/heads/master@{#607039}
|
void OverlayWindowViews::UpdateLayerBoundsWithLetterboxing(
gfx::Size window_size) {
if (window_bounds_.size().IsEmpty() || natural_size_.IsEmpty())
return;
gfx::Rect letterbox_region = media::ComputeLetterboxRegion(
gfx::Rect(gfx::Point(0, 0), window_size), natural_size_);
if (letterbox_region.IsEmpty())
return;
gfx::Size letterbox_size = letterbox_region.size();
gfx::Point origin =
gfx::Point((window_size.width() - letterbox_size.width()) / 2,
(window_size.height() - letterbox_size.height()) / 2);
video_bounds_.set_origin(origin);
video_bounds_.set_size(letterbox_region.size());
UpdateControlsBounds();
controller_->UpdateLayerBounds();
}
|
void OverlayWindowViews::UpdateLayerBoundsWithLetterboxing(
gfx::Size window_size) {
if (window_bounds_.size().IsEmpty() || natural_size_.IsEmpty())
return;
gfx::Rect letterbox_region = media::ComputeLetterboxRegion(
gfx::Rect(gfx::Point(0, 0), window_size), natural_size_);
if (letterbox_region.IsEmpty())
return;
gfx::Size letterbox_size = letterbox_region.size();
gfx::Point origin =
gfx::Point((window_size.width() - letterbox_size.width()) / 2,
(window_size.height() - letterbox_size.height()) / 2);
video_bounds_.set_origin(origin);
video_bounds_.set_size(letterbox_region.size());
UpdateControlsBounds();
controller_->UpdateLayerBounds();
}
|
C
|
Chrome
| 0 |
CVE-2019-11922
|
https://www.cvedetails.com/cve/CVE-2019-11922/
|
CWE-362
|
https://github.com/facebook/zstd/pull/1404/commits/3e5cdf1b6a85843e991d7d10f6a2567c15580da0
|
3e5cdf1b6a85843e991d7d10f6a2567c15580da0
|
fixed T36302429
|
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
{
if (!cctxParams) { return ERROR(GENERIC); }
CHECK_F( ZSTD_checkCParams(params.cParams) );
memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->cParams = params.cParams;
cctxParams->fParams = params.fParams;
cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
assert(!ZSTD_checkCParams(params.cParams));
return 0;
}
|
size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
{
if (!cctxParams) { return ERROR(GENERIC); }
CHECK_F( ZSTD_checkCParams(params.cParams) );
memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->cParams = params.cParams;
cctxParams->fParams = params.fParams;
cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
assert(!ZSTD_checkCParams(params.cParams));
return 0;
}
|
C
|
zstd
| 0 |
CVE-2019-5803
|
https://www.cvedetails.com/cve/CVE-2019-5803/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0e3b0c22a5c596bdc24a391b3f02952c1c3e4f1b
|
0e3b0c22a5c596bdc24a391b3f02952c1c3e4f1b
|
Check the source browsing context's CSP in Location::SetLocation prior to dispatching a navigation to a `javascript:` URL.
Makes `javascript:` navigations via window.location.href compliant with
https://html.spec.whatwg.org/#navigate, which states that the source
browsing context must be checked (rather than the current browsing
context).
Bug: 909865
Change-Id: Id6aef6eef56865e164816c67eb9fe07ea1cb1b4e
Reviewed-on: https://chromium-review.googlesource.com/c/1359823
Reviewed-by: Andy Paicu <[email protected]>
Reviewed-by: Mike West <[email protected]>
Commit-Queue: Andrew Comminos <[email protected]>
Cr-Commit-Position: refs/heads/master@{#614451}
|
Document* Location::GetDocument() const {
return ToLocalDOMWindow(dom_window_)->document();
}
|
Document* Location::GetDocument() const {
return ToLocalDOMWindow(dom_window_)->document();
}
|
C
|
Chrome
| 0 |
CVE-2015-6773
|
https://www.cvedetails.com/cve/CVE-2015-6773/
|
CWE-119
|
https://github.com/chromium/chromium/commit/33827275411b33371e7bb750cce20f11de85002d
|
33827275411b33371e7bb750cce20f11de85002d
|
Move SelectionTemplate::is_handle_visible_ to FrameSelection
This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate|
since handle visibility is used only for setting |FrameSelection|, hence it is
a redundant member variable of |SelectionTemplate|.
Bug: 742093
Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e
Reviewed-on: https://chromium-review.googlesource.com/595389
Commit-Queue: Yoshifumi Inoue <[email protected]>
Reviewed-by: Xiaocheng Hu <[email protected]>
Reviewed-by: Kent Tamura <[email protected]>
Cr-Commit-Position: refs/heads/master@{#491660}
|
void SelectionEditor::DidMergeTextNodes(
const Text& merged_node,
const NodeWithIndex& node_to_be_removed_with_index,
unsigned old_length) {
if (selection_.IsNone()) {
DidFinishDOMMutation();
return;
}
const Position& new_base = UpdatePostionAfterAdoptingTextNodesMerged(
selection_.base_, merged_node, node_to_be_removed_with_index, old_length);
const Position& new_extent = UpdatePostionAfterAdoptingTextNodesMerged(
selection_.extent_, merged_node, node_to_be_removed_with_index,
old_length);
DidFinishTextChange(new_base, new_extent);
}
|
void SelectionEditor::DidMergeTextNodes(
const Text& merged_node,
const NodeWithIndex& node_to_be_removed_with_index,
unsigned old_length) {
if (selection_.IsNone()) {
DidFinishDOMMutation();
return;
}
const Position& new_base = UpdatePostionAfterAdoptingTextNodesMerged(
selection_.base_, merged_node, node_to_be_removed_with_index, old_length);
const Position& new_extent = UpdatePostionAfterAdoptingTextNodesMerged(
selection_.extent_, merged_node, node_to_be_removed_with_index,
old_length);
DidFinishTextChange(new_base, new_extent);
}
|
C
|
Chrome
| 0 |
CVE-2017-9732
|
https://www.cvedetails.com/cve/CVE-2017-9732/
|
CWE-400
|
https://github.com/elric1/knc/commit/f237f3e09ecbaf59c897f5046538a7b1a3fa40c1
|
f237f3e09ecbaf59c897f5046538a7b1a3fa40c1
|
knc: fix a couple of memory leaks.
One of these can be remotely triggered during the authentication
phase which leads to a remote DoS possibility.
Pointed out by: Imre Rad <[email protected]>
|
gstd_release_context(void *ctx) {
OM_uint32 min;
gss_delete_sec_context(&min, (gss_ctx_id_t *)ctx, GSS_C_NO_BUFFER);
}
|
gstd_release_context(void *ctx) {
OM_uint32 min;
gss_delete_sec_context(&min, (gss_ctx_id_t *)ctx, GSS_C_NO_BUFFER);
}
|
C
|
knc
| 0 |
CVE-2017-13083
|
https://www.cvedetails.com/cve/CVE-2017-13083/
|
CWE-494
|
https://github.com/pbatard/rufus/commit/c3c39f7f8a11f612c4ebf7affce25ec6928eb1cb
|
c3c39f7f8a11f612c4ebf7affce25ec6928eb1cb
|
[pki] fix https://www.kb.cert.org/vuls/id/403768
* This commit effectively fixes https://www.kb.cert.org/vuls/id/403768 (CVE-2017-13083) as
it is described per its revision 11, which is the latest revision at the time of this commit,
by disabling Windows prompts, enacted during signature validation, that allow the user to
bypass the intended signature verification checks.
* It needs to be pointed out that the vulnerability ("allow(ing) the use of a self-signed
certificate"), which relies on the end-user actively ignoring a Windows prompt that tells
them that the update failed the signature validation whilst also advising against running it,
is being fully addressed, even as the update protocol remains HTTP.
* It also need to be pointed out that the extended delay (48 hours) between the time the
vulnerability was reported and the moment it is fixed in our codebase has to do with
the fact that the reporter chose to deviate from standard security practices by not
disclosing the details of the vulnerability with us, be it publicly or privately,
before creating the cert.org report. The only advance notification we received was a
generic note about the use of HTTP vs HTTPS, which, as have established, is not
immediately relevant to addressing the reported vulnerability.
* Closes #1009
* Note: The other vulnerability scenario described towards the end of #1009, which
doesn't have to do with the "lack of CA checking", will be addressed separately.
|
INT_PTR CALLBACK SelectionDynCallback(HWND hwndDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
int r = -1;
switch (message) {
case WM_INITDIALOG:
return (INT_PTR)TRUE;
case WM_COMMAND:
switch (LOWORD(wParam)) {
case IDOK:
r = 0;
case IDCANCEL:
EndDialog(hwndDlg, r);
return (INT_PTR)TRUE;
}
}
return FALSE;
}
|
INT_PTR CALLBACK SelectionDynCallback(HWND hwndDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
int r = -1;
switch (message) {
case WM_INITDIALOG:
return (INT_PTR)TRUE;
case WM_COMMAND:
switch (LOWORD(wParam)) {
case IDOK:
r = 0;
case IDCANCEL:
EndDialog(hwndDlg, r);
return (INT_PTR)TRUE;
}
}
return FALSE;
}
|
C
|
rufus
| 0 |
CVE-2014-3610
|
https://www.cvedetails.com/cve/CVE-2014-3610/
|
CWE-264
|
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
if (irr == -1)
return;
if (tpr >= irr)
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
if (irr == -1)
return;
if (tpr >= irr)
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
|
C
|
linux
| 0 |
CVE-2016-7425
|
https://www.cvedetails.com/cve/CVE-2016-7425/
|
CWE-119
|
https://github.com/torvalds/linux/commit/7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
|
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
|
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
We need to put an upper bound on "user_len" so the memcpy() doesn't
overflow.
Cc: <[email protected]>
Reported-by: Marco Grassi <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: Tomas Henzl <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
|
static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
{
uint32_t host_interrupt_status;
struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
/*
*********************************************
** check outbound intstatus
*********************************************
*/
host_interrupt_status = readl(&phbcmu->host_int_status) &
(ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
if (!host_interrupt_status)
return IRQ_NONE;
do {
if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
arcmsr_hbaC_doorbell_isr(pACB);
/* MU post queue interrupts*/
if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
arcmsr_hbaC_postqueue_isr(pACB);
host_interrupt_status = readl(&phbcmu->host_int_status);
} while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
return IRQ_HANDLED;
}
|
static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
{
uint32_t host_interrupt_status;
struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
/*
*********************************************
** check outbound intstatus
*********************************************
*/
host_interrupt_status = readl(&phbcmu->host_int_status) &
(ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
if (!host_interrupt_status)
return IRQ_NONE;
do {
if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
arcmsr_hbaC_doorbell_isr(pACB);
/* MU post queue interrupts*/
if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
arcmsr_hbaC_postqueue_isr(pACB);
host_interrupt_status = readl(&phbcmu->host_int_status);
} while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
return IRQ_HANDLED;
}
|
C
|
linux
| 0 |
CVE-2014-3173
|
https://www.cvedetails.com/cve/CVE-2014-3173/
|
CWE-119
|
https://github.com/chromium/chromium/commit/ee7579229ff7e9e5ae28bf53aea069251499d7da
|
ee7579229ff7e9e5ae28bf53aea069251499d7da
|
Framebuffer clear() needs to consider the situation some draw buffers are disabled.
This is when we expose DrawBuffers extension.
BUG=376951
TEST=the attached test case, webgl conformance
[email protected],[email protected]
Review URL: https://codereview.chromium.org/315283002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@275338 0039d316-1c4b-4281-b951-d872f2087c98
|
VertexAttribManager* GetVertexAttribManager(GLuint client_id) {
VertexAttribManager* info =
vertex_array_manager()->GetVertexAttribManager(client_id);
return info;
}
|
VertexAttribManager* GetVertexAttribManager(GLuint client_id) {
VertexAttribManager* info =
vertex_array_manager()->GetVertexAttribManager(client_id);
return info;
}
|
C
|
Chrome
| 0 |
CVE-2016-7910
|
https://www.cvedetails.com/cve/CVE-2016-7910/
|
CWE-416
|
https://github.com/torvalds/linux/commit/77da160530dd1dc94f6ae15a981f24e5f0021e84
|
77da160530dd1dc94f6ae15a981f24e5f0021e84
|
block: fix use-after-free in seq file
I got a KASAN report of use-after-free:
==================================================================
BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508
Read of size 8 by task trinity-c1/315
=============================================================================
BUG kmalloc-32 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315
___slab_alloc+0x4f1/0x520
__slab_alloc.isra.58+0x56/0x80
kmem_cache_alloc_trace+0x260/0x2a0
disk_seqf_start+0x66/0x110
traverse+0x176/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315
__slab_free+0x17a/0x2c0
kfree+0x20a/0x220
disk_seqf_stop+0x42/0x50
traverse+0x3b5/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480
ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480
ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970
Call Trace:
[<ffffffff81d6ce81>] dump_stack+0x65/0x84
[<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0
[<ffffffff814704ff>] object_err+0x2f/0x40
[<ffffffff814754d1>] kasan_report_error+0x221/0x520
[<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40
[<ffffffff83888161>] klist_iter_exit+0x61/0x70
[<ffffffff82404389>] class_dev_iter_exit+0x9/0x10
[<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50
[<ffffffff8151f812>] seq_read+0x4b2/0x11a0
[<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180
[<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210
[<ffffffff814b4c45>] do_readv_writev+0x565/0x660
[<ffffffff814b8a17>] vfs_readv+0x67/0xa0
[<ffffffff814b8de6>] do_preadv+0x126/0x170
[<ffffffff814b92ec>] SyS_preadv+0xc/0x10
This problem can occur in the following situation:
open()
- pread()
- .seq_start()
- iter = kmalloc() // succeeds
- seqf->private = iter
- .seq_stop()
- kfree(seqf->private)
- pread()
- .seq_start()
- iter = kmalloc() // fails
- .seq_stop()
- class_dev_iter_exit(seqf->private) // boom! old pointer
As the comment in disk_seqf_stop() says, stop is called even if start
failed, so we need to reinitialise the private pointer to NULL when seq
iteration stops.
An alternative would be to set the private pointer to NULL when the
kmalloc() in disk_seqf_start() fails.
Cc: [email protected]
Signed-off-by: Vegard Nossum <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
static int __init genhd_device_init(void)
{
int error;
block_class.dev_kobj = sysfs_dev_block_kobj;
error = class_register(&block_class);
if (unlikely(error))
return error;
bdev_map = kobj_map_init(base_probe, &block_class_lock);
blk_dev_init();
register_blkdev(BLOCK_EXT_MAJOR, "blkext");
/* create top-level block dir */
if (!sysfs_deprecated)
block_depr = kobject_create_and_add("block", NULL);
return 0;
}
|
static int __init genhd_device_init(void)
{
int error;
block_class.dev_kobj = sysfs_dev_block_kobj;
error = class_register(&block_class);
if (unlikely(error))
return error;
bdev_map = kobj_map_init(base_probe, &block_class_lock);
blk_dev_init();
register_blkdev(BLOCK_EXT_MAJOR, "blkext");
/* create top-level block dir */
if (!sysfs_deprecated)
block_depr = kobject_create_and_add("block", NULL);
return 0;
}
|
C
|
linux
| 0 |
CVE-2015-6755
|
https://www.cvedetails.com/cve/CVE-2015-6755/
|
CWE-264
|
https://github.com/chromium/chromium/commit/c71a21e6dda9025c2bf823c5aab791c2ae8cdfc2
|
c71a21e6dda9025c2bf823c5aab791c2ae8cdfc2
|
parserInsertBefore: Bail out if the parent no longer contains the child.
nextChild may be removed from the DOM tree during the
|parserRemoveChild(*newChild)| call which triggers unload events of newChild's
descendant iframes. In order to maintain the integrity of the DOM tree, the
insertion of newChild must be aborted in this case.
This patch adds a return statement that rectifies the behavior in this
edge case.
BUG=519558
Review URL: https://codereview.chromium.org/1283263002
git-svn-id: svn://svn.chromium.org/blink/trunk@200690 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
bool ContainerNode::getLowerRightCorner(FloatPoint& point) const
{
if (!layoutObject())
return false;
LayoutObject* o = layoutObject();
if (!o->isInline() || o->isReplaced()) {
LayoutBox* box = toLayoutBox(o);
point = o->localToAbsolute(FloatPoint(box->size()), UseTransforms);
return true;
}
LayoutObject* startContinuation = nullptr;
while (o) {
if (LayoutObject* oLastChild = o->slowLastChild()) {
o = oLastChild;
} else if (o != layoutObject() && o->previousSibling()) {
o = o->previousSibling();
} else {
LayoutObject* prev = nullptr;
while (!prev) {
if (startContinuation == o) {
startContinuation = nullptr;
} else if (!startContinuation) {
if (LayoutObject* continuation = endOfContinuations(o)) {
startContinuation = o;
prev = continuation;
break;
}
}
if (o == layoutObject()) {
return false;
}
o = o->parent();
if (!o)
return false;
prev = o->previousSibling();
}
o = prev;
}
ASSERT(o);
if (o->isText() || o->isReplaced()) {
point = FloatPoint();
if (o->isText()) {
LayoutText* text = toLayoutText(o);
IntRect linesBox = text->linesBoundingBox();
if (!linesBox.maxX() && !linesBox.maxY())
continue;
point.moveBy(linesBox.maxXMaxYCorner());
} else {
LayoutBox* box = toLayoutBox(o);
point.moveBy(box->frameRect().maxXMaxYCorner());
}
point = o->container()->localToAbsolute(point, UseTransforms);
return true;
}
}
return true;
}
|
bool ContainerNode::getLowerRightCorner(FloatPoint& point) const
{
if (!layoutObject())
return false;
LayoutObject* o = layoutObject();
if (!o->isInline() || o->isReplaced()) {
LayoutBox* box = toLayoutBox(o);
point = o->localToAbsolute(FloatPoint(box->size()), UseTransforms);
return true;
}
LayoutObject* startContinuation = nullptr;
while (o) {
if (LayoutObject* oLastChild = o->slowLastChild()) {
o = oLastChild;
} else if (o != layoutObject() && o->previousSibling()) {
o = o->previousSibling();
} else {
LayoutObject* prev = nullptr;
while (!prev) {
if (startContinuation == o) {
startContinuation = nullptr;
} else if (!startContinuation) {
if (LayoutObject* continuation = endOfContinuations(o)) {
startContinuation = o;
prev = continuation;
break;
}
}
if (o == layoutObject()) {
return false;
}
o = o->parent();
if (!o)
return false;
prev = o->previousSibling();
}
o = prev;
}
ASSERT(o);
if (o->isText() || o->isReplaced()) {
point = FloatPoint();
if (o->isText()) {
LayoutText* text = toLayoutText(o);
IntRect linesBox = text->linesBoundingBox();
if (!linesBox.maxX() && !linesBox.maxY())
continue;
point.moveBy(linesBox.maxXMaxYCorner());
} else {
LayoutBox* box = toLayoutBox(o);
point.moveBy(box->frameRect().maxXMaxYCorner());
}
point = o->container()->localToAbsolute(point, UseTransforms);
return true;
}
}
return true;
}
|
C
|
Chrome
| 0 |
CVE-2014-0791
|
https://www.cvedetails.com/cve/CVE-2014-0791/
|
CWE-189
|
https://github.com/sidhpurwala-huzaifa/FreeRDP/commit/e2745807c4c3e0a590c0f69a9b655dc74ebaa03e
|
e2745807c4c3e0a590c0f69a9b655dc74ebaa03e
|
Fix possible integer overflow in license_read_scope_list()
|
int license_recv(rdpLicense* license, wStream* s)
{
BYTE flags;
BYTE bMsgType;
UINT16 wMsgSize;
UINT16 length;
UINT16 channelId;
UINT16 securityFlags;
if (!rdp_read_header(license->rdp, s, &length, &channelId))
{
fprintf(stderr, "Incorrect RDP header.\n");
return -1;
}
if (!rdp_read_security_header(s, &securityFlags))
return -1;
if (securityFlags & SEC_ENCRYPT)
{
if (!rdp_decrypt(license->rdp, s, length - 4, securityFlags))
{
fprintf(stderr, "rdp_decrypt failed\n");
return -1;
}
}
if (!(securityFlags & SEC_LICENSE_PKT))
{
int status;
if (!(securityFlags & SEC_ENCRYPT))
Stream_Rewind(s, RDP_SECURITY_HEADER_LENGTH);
status = rdp_recv_out_of_sequence_pdu(license->rdp, s);
if (status < 0)
{
fprintf(stderr, "Unexpected license packet.\n");
return status;
}
return 0;
}
if (!license_read_preamble(s, &bMsgType, &flags, &wMsgSize)) /* preamble (4 bytes) */
return -1;
DEBUG_LICENSE("Receiving %s Packet", LICENSE_MESSAGE_STRINGS[bMsgType & 0x1F]);
switch (bMsgType)
{
case LICENSE_REQUEST:
if (!license_read_license_request_packet(license, s))
return -1;
license_send_new_license_request_packet(license);
break;
case PLATFORM_CHALLENGE:
if (!license_read_platform_challenge_packet(license, s))
return -1;
license_send_platform_challenge_response_packet(license);
break;
case NEW_LICENSE:
license_read_new_license_packet(license, s);
break;
case UPGRADE_LICENSE:
license_read_upgrade_license_packet(license, s);
break;
case ERROR_ALERT:
if (!license_read_error_alert_packet(license, s))
return -1;
break;
default:
fprintf(stderr, "invalid bMsgType:%d\n", bMsgType);
return FALSE;
}
return 0;
}
|
int license_recv(rdpLicense* license, wStream* s)
{
BYTE flags;
BYTE bMsgType;
UINT16 wMsgSize;
UINT16 length;
UINT16 channelId;
UINT16 securityFlags;
if (!rdp_read_header(license->rdp, s, &length, &channelId))
{
fprintf(stderr, "Incorrect RDP header.\n");
return -1;
}
if (!rdp_read_security_header(s, &securityFlags))
return -1;
if (securityFlags & SEC_ENCRYPT)
{
if (!rdp_decrypt(license->rdp, s, length - 4, securityFlags))
{
fprintf(stderr, "rdp_decrypt failed\n");
return -1;
}
}
if (!(securityFlags & SEC_LICENSE_PKT))
{
int status;
if (!(securityFlags & SEC_ENCRYPT))
Stream_Rewind(s, RDP_SECURITY_HEADER_LENGTH);
status = rdp_recv_out_of_sequence_pdu(license->rdp, s);
if (status < 0)
{
fprintf(stderr, "Unexpected license packet.\n");
return status;
}
return 0;
}
if (!license_read_preamble(s, &bMsgType, &flags, &wMsgSize)) /* preamble (4 bytes) */
return -1;
DEBUG_LICENSE("Receiving %s Packet", LICENSE_MESSAGE_STRINGS[bMsgType & 0x1F]);
switch (bMsgType)
{
case LICENSE_REQUEST:
if (!license_read_license_request_packet(license, s))
return -1;
license_send_new_license_request_packet(license);
break;
case PLATFORM_CHALLENGE:
if (!license_read_platform_challenge_packet(license, s))
return -1;
license_send_platform_challenge_response_packet(license);
break;
case NEW_LICENSE:
license_read_new_license_packet(license, s);
break;
case UPGRADE_LICENSE:
license_read_upgrade_license_packet(license, s);
break;
case ERROR_ALERT:
if (!license_read_error_alert_packet(license, s))
return -1;
break;
default:
fprintf(stderr, "invalid bMsgType:%d\n", bMsgType);
return FALSE;
}
return 0;
}
|
C
|
FreeRDP
| 0 |
CVE-2012-3552
|
https://www.cvedetails.com/cve/CVE-2012-3552/
|
CWE-362
|
https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
|
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
__be32 daddr;
__be32 saddr;
u8 tos;
int err;
struct ip_options_data opt_copy;
err = -EMSGSIZE;
if (len > 0xFFFF)
goto out;
/*
* Check the flags.
*/
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
goto out; /* compatibility */
/*
* Get and verify the address.
*/
if (msg->msg_namelen) {
struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
static int complained;
if (!complained++)
printk(KERN_INFO "%s forgot to set AF_INET in "
"raw sendmsg. Fix it!\n",
current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
}
daddr = usin->sin_addr.s_addr;
/* ANK: I did not forget to get protocol from port field.
* I just do not know, who uses this weirdness.
* IP_HDRINCL is much more convenient.
*/
} else {
err = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
}
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
goto out;
if (ipc.opt)
free = 1;
}
saddr = ipc.addr;
ipc.addr = daddr;
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
if (ipc.opt) {
err = -EINVAL;
/* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense.
*/
if (inet->hdrincl)
goto done;
if (ipc.opt->opt.srr) {
if (!daddr)
goto done;
daddr = ipc.opt->opt.faddr;
}
}
tos = RT_CONN_FLAGS(sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
if (err)
goto done;
}
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto done;
}
}
err = -EACCES;
if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
goto done;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, msg->msg_iov, len,
&rt, msg->msg_flags);
else {
if (!ipc.addr)
ipc.addr = rt->rt_dst;
lock_sock(sk);
err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
}
release_sock(sk);
}
done:
if (free)
kfree(ipc.opt);
ip_rt_put(rt);
out:
if (err < 0)
return err;
return len;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
|
static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
__be32 daddr;
__be32 saddr;
u8 tos;
int err;
err = -EMSGSIZE;
if (len > 0xFFFF)
goto out;
/*
* Check the flags.
*/
err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */
goto out; /* compatibility */
/*
* Get and verify the address.
*/
if (msg->msg_namelen) {
struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
err = -EINVAL;
if (msg->msg_namelen < sizeof(*usin))
goto out;
if (usin->sin_family != AF_INET) {
static int complained;
if (!complained++)
printk(KERN_INFO "%s forgot to set AF_INET in "
"raw sendmsg. Fix it!\n",
current->comm);
err = -EAFNOSUPPORT;
if (usin->sin_family)
goto out;
}
daddr = usin->sin_addr.s_addr;
/* ANK: I did not forget to get protocol from port field.
* I just do not know, who uses this weirdness.
* IP_HDRINCL is much more convenient.
*/
} else {
err = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
daddr = inet->inet_daddr;
}
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc);
if (err)
goto out;
if (ipc.opt)
free = 1;
}
saddr = ipc.addr;
ipc.addr = daddr;
if (!ipc.opt)
ipc.opt = inet->opt;
if (ipc.opt) {
err = -EINVAL;
/* Linux does not mangle headers on raw sockets,
* so that IP options + IP_HDRINCL is non-sense.
*/
if (inet->hdrincl)
goto done;
if (ipc.opt->srr) {
if (!daddr)
goto done;
daddr = ipc.opt->faddr;
}
}
tos = RT_CONN_FLAGS(sk);
if (msg->msg_flags & MSG_DONTROUTE)
tos |= RTO_ONLINK;
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
}
{
struct flowi4 fl4;
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
if (err)
goto done;
}
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto done;
}
}
err = -EACCES;
if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
goto done;
if (msg->msg_flags & MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = raw_send_hdrinc(sk, msg->msg_iov, len,
&rt, msg->msg_flags);
else {
if (!ipc.addr)
ipc.addr = rt->rt_dst;
lock_sock(sk);
err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0,
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk);
if (err == -ENOBUFS && !inet->recverr)
err = 0;
}
release_sock(sk);
}
done:
if (free)
kfree(ipc.opt);
ip_rt_put(rt);
out:
if (err < 0)
return err;
return len;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
|
C
|
linux
| 1 |
CVE-2013-1819
|
https://www.cvedetails.com/cve/CVE-2013-1819/
|
CWE-20
|
https://github.com/torvalds/linux/commit/eb178619f930fa2ba2348de332a1ff1c66a31424
|
eb178619f930fa2ba2348de332a1ff1c66a31424
|
xfs: fix _xfs_buf_find oops on blocks beyond the filesystem end
When _xfs_buf_find is passed an out of range address, it will fail
to find a relevant struct xfs_perag and oops with a null
dereference. This can happen when trying to walk a filesystem with a
metadata inode that has a partially corrupted extent map (i.e. the
block number returned is corrupt, but is otherwise intact) and we
try to read from the corrupted block address.
In this case, just fail the lookup. If it is readahead being issued,
it will simply not be done, but if it is real read that fails we
will get an error being reported. Ideally this case should result
in an EFSCORRUPTED error being reported, but we cannot return an
error through xfs_buf_read() or xfs_buf_get() so this lookup failure
may result in ENOMEM or EIO errors being reported instead.
Signed-off-by: Dave Chinner <[email protected]>
Reviewed-by: Brian Foster <[email protected]>
Reviewed-by: Ben Myers <[email protected]>
Signed-off-by: Ben Myers <[email protected]>
|
xfs_buf_terminate(void)
{
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
}
|
xfs_buf_terminate(void)
{
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
}
|
C
|
linux
| 0 |
CVE-2016-1670
|
https://www.cvedetails.com/cve/CVE-2016-1670/
|
CWE-362
|
https://github.com/chromium/chromium/commit/1af4fada49c4f3890f16daac31d38379a9d782b2
|
1af4fada49c4f3890f16daac31d38379a9d782b2
|
Block a compromised renderer from reusing request ids.
BUG=578882
Review URL: https://codereview.chromium.org/1608573002
Cr-Commit-Position: refs/heads/master@{#372547}
|
void ResourceDispatcherHostImpl::BeginRequest(
int request_id,
const ResourceHostMsg_Request& request_data,
IPC::Message* sync_result, // only valid for sync
int route_id) {
int process_type = filter_->process_type();
int child_id = filter_->child_id();
// Reject request id that's currently in use.
if (IsRequestIDInUse(GlobalRequestID(child_id, request_id))) {
bad_message::ReceivedBadMessage(filter_,
bad_message::RDH_INVALID_REQUEST_ID);
return;
}
if (IsBrowserSideNavigationEnabled() &&
IsResourceTypeFrame(request_data.resource_type) &&
!request_data.url.SchemeIs(url::kBlobScheme)) {
bad_message::ReceivedBadMessage(filter_, bad_message::RDH_INVALID_URL);
return;
}
if (request_data.priority < net::MINIMUM_PRIORITY ||
request_data.priority > net::MAXIMUM_PRIORITY) {
bad_message::ReceivedBadMessage(filter_, bad_message::RDH_INVALID_PRIORITY);
return;
}
char url_buf[128];
base::strlcpy(url_buf, request_data.url.spec().c_str(), arraysize(url_buf));
base::debug::Alias(url_buf);
LoaderMap::iterator it = pending_loaders_.find(
GlobalRequestID(request_data.transferred_request_child_id,
request_data.transferred_request_request_id));
if (it != pending_loaders_.end()) {
if (it->second->is_transferring()) {
ResourceLoader* deferred_loader = it->second.get();
UpdateRequestForTransfer(child_id, route_id, request_id,
request_data, it);
deferred_loader->CompleteTransfer();
} else {
bad_message::ReceivedBadMessage(
filter_, bad_message::RDH_REQUEST_NOT_TRANSFERRING);
}
return;
}
ResourceContext* resource_context = NULL;
net::URLRequestContext* request_context = NULL;
filter_->GetContexts(request_data.resource_type, request_data.origin_pid,
&resource_context, &request_context);
CHECK(ContainsKey(active_resource_contexts_, resource_context));
net::HttpRequestHeaders headers;
headers.AddHeadersFromString(request_data.headers);
if (is_shutdown_ ||
!ShouldServiceRequest(process_type, child_id, request_data, headers,
filter_, resource_context)) {
AbortRequestBeforeItStarts(filter_, sync_result, request_id);
return;
}
if (delegate_ && !delegate_->ShouldBeginRequest(request_data.method,
request_data.url,
request_data.resource_type,
resource_context)) {
AbortRequestBeforeItStarts(filter_, sync_result, request_id);
return;
}
scoped_ptr<net::URLRequest> new_request = request_context->CreateRequest(
request_data.url, request_data.priority, NULL);
new_request->set_method(request_data.method);
new_request->set_first_party_for_cookies(
request_data.first_party_for_cookies);
new_request->set_initiator(request_data.request_initiator);
if (request_data.resource_type == RESOURCE_TYPE_MAIN_FRAME) {
new_request->set_first_party_url_policy(
net::URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT);
}
const Referrer referrer(request_data.referrer, request_data.referrer_policy);
SetReferrerForRequest(new_request.get(), referrer);
new_request->SetExtraRequestHeaders(headers);
storage::BlobStorageContext* blob_context =
GetBlobStorageContext(filter_->blob_storage_context());
if (request_data.request_body.get()) {
if (blob_context) {
AttachRequestBodyBlobDataHandles(
request_data.request_body.get(),
blob_context);
}
new_request->set_upload(UploadDataStreamBuilder::Build(
request_data.request_body.get(),
blob_context,
filter_->file_system_context(),
BrowserThread::GetMessageLoopProxyForThread(BrowserThread::FILE)
.get()));
}
bool allow_download = request_data.allow_download &&
IsResourceTypeFrame(request_data.resource_type);
bool do_not_prompt_for_login = request_data.do_not_prompt_for_login;
bool is_sync_load = sync_result != NULL;
ChildProcessSecurityPolicyImpl* policy =
ChildProcessSecurityPolicyImpl::GetInstance();
bool report_raw_headers = request_data.report_raw_headers;
if (report_raw_headers && !policy->CanReadRawCookies(child_id)) {
VLOG(1) << "Denied unauthorized request for raw headers";
report_raw_headers = false;
}
int load_flags =
BuildLoadFlagsForRequest(request_data, child_id, is_sync_load);
if (request_data.resource_type == RESOURCE_TYPE_PREFETCH ||
request_data.resource_type == RESOURCE_TYPE_FAVICON) {
do_not_prompt_for_login = true;
}
if (request_data.resource_type == RESOURCE_TYPE_IMAGE &&
HTTP_AUTH_RELATION_BLOCKED_CROSS ==
HttpAuthRelationTypeOf(request_data.url,
request_data.first_party_for_cookies)) {
do_not_prompt_for_login = true;
load_flags |= net::LOAD_DO_NOT_USE_EMBEDDED_IDENTITY;
}
bool support_async_revalidation =
!is_sync_load && async_revalidation_manager_ &&
AsyncRevalidationManager::QualifiesForAsyncRevalidation(request_data);
if (support_async_revalidation)
load_flags |= net::LOAD_SUPPORT_ASYNC_REVALIDATION;
if (is_sync_load) {
DCHECK_EQ(request_data.priority, net::MAXIMUM_PRIORITY);
DCHECK_NE(load_flags & net::LOAD_IGNORE_LIMITS, 0);
} else {
DCHECK_EQ(load_flags & net::LOAD_IGNORE_LIMITS, 0);
}
new_request->SetLoadFlags(load_flags);
ResourceRequestInfoImpl* extra_info = new ResourceRequestInfoImpl(
process_type, child_id, route_id,
-1, // frame_tree_node_id
request_data.origin_pid, request_id, request_data.render_frame_id,
request_data.is_main_frame, request_data.parent_is_main_frame,
request_data.resource_type, request_data.transition_type,
request_data.should_replace_current_entry,
false, // is download
false, // is stream
allow_download, request_data.has_user_gesture,
request_data.enable_load_timing, request_data.enable_upload_progress,
do_not_prompt_for_login, request_data.referrer_policy,
request_data.visiblity_state, resource_context, filter_->GetWeakPtr(),
report_raw_headers, !is_sync_load,
IsUsingLoFi(request_data.lofi_state, delegate_, *new_request,
resource_context,
request_data.resource_type == RESOURCE_TYPE_MAIN_FRAME),
support_async_revalidation ? request_data.headers : std::string());
extra_info->AssociateWithRequest(new_request.get());
if (new_request->url().SchemeIs(url::kBlobScheme)) {
storage::BlobProtocolHandler::SetRequestedBlobDataHandle(
new_request.get(),
filter_->blob_storage_context()->context()->GetBlobDataFromPublicURL(
new_request->url()));
}
const bool should_skip_service_worker =
request_data.skip_service_worker || is_sync_load;
ServiceWorkerRequestHandler::InitializeHandler(
new_request.get(), filter_->service_worker_context(), blob_context,
child_id, request_data.service_worker_provider_id,
should_skip_service_worker,
request_data.fetch_request_mode, request_data.fetch_credentials_mode,
request_data.fetch_redirect_mode, request_data.resource_type,
request_data.fetch_request_context_type, request_data.fetch_frame_type,
request_data.request_body);
if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalWebPlatformFeatures)) {
ForeignFetchRequestHandler::InitializeHandler(
new_request.get(), filter_->service_worker_context(), blob_context,
child_id, request_data.service_worker_provider_id,
should_skip_service_worker,
request_data.fetch_request_mode, request_data.fetch_credentials_mode,
request_data.fetch_redirect_mode, request_data.resource_type,
request_data.fetch_request_context_type, request_data.fetch_frame_type,
request_data.request_body);
}
AppCacheInterceptor::SetExtraRequestInfo(
new_request.get(), filter_->appcache_service(), child_id,
request_data.appcache_host_id, request_data.resource_type,
request_data.should_reset_appcache);
scoped_ptr<ResourceHandler> handler(
CreateResourceHandler(
new_request.get(),
request_data, sync_result, route_id, process_type, child_id,
resource_context));
if (handler)
BeginRequestInternal(std::move(new_request), std::move(handler));
}
|
void ResourceDispatcherHostImpl::BeginRequest(
int request_id,
const ResourceHostMsg_Request& request_data,
IPC::Message* sync_result, // only valid for sync
int route_id) {
int process_type = filter_->process_type();
int child_id = filter_->child_id();
if (IsBrowserSideNavigationEnabled() &&
IsResourceTypeFrame(request_data.resource_type) &&
!request_data.url.SchemeIs(url::kBlobScheme)) {
bad_message::ReceivedBadMessage(filter_, bad_message::RDH_INVALID_URL);
return;
}
if (request_data.priority < net::MINIMUM_PRIORITY ||
request_data.priority > net::MAXIMUM_PRIORITY) {
bad_message::ReceivedBadMessage(filter_, bad_message::RDH_INVALID_PRIORITY);
return;
}
char url_buf[128];
base::strlcpy(url_buf, request_data.url.spec().c_str(), arraysize(url_buf));
base::debug::Alias(url_buf);
LoaderMap::iterator it = pending_loaders_.find(
GlobalRequestID(request_data.transferred_request_child_id,
request_data.transferred_request_request_id));
if (it != pending_loaders_.end()) {
if (it->second->is_transferring()) {
ResourceLoader* deferred_loader = it->second.get();
UpdateRequestForTransfer(child_id, route_id, request_id,
request_data, it);
deferred_loader->CompleteTransfer();
} else {
bad_message::ReceivedBadMessage(
filter_, bad_message::RDH_REQUEST_NOT_TRANSFERRING);
}
return;
}
ResourceContext* resource_context = NULL;
net::URLRequestContext* request_context = NULL;
filter_->GetContexts(request_data.resource_type, request_data.origin_pid,
&resource_context, &request_context);
CHECK(ContainsKey(active_resource_contexts_, resource_context));
net::HttpRequestHeaders headers;
headers.AddHeadersFromString(request_data.headers);
if (is_shutdown_ ||
!ShouldServiceRequest(process_type, child_id, request_data, headers,
filter_, resource_context)) {
AbortRequestBeforeItStarts(filter_, sync_result, request_id);
return;
}
if (delegate_ && !delegate_->ShouldBeginRequest(request_data.method,
request_data.url,
request_data.resource_type,
resource_context)) {
AbortRequestBeforeItStarts(filter_, sync_result, request_id);
return;
}
scoped_ptr<net::URLRequest> new_request = request_context->CreateRequest(
request_data.url, request_data.priority, NULL);
new_request->set_method(request_data.method);
new_request->set_first_party_for_cookies(
request_data.first_party_for_cookies);
new_request->set_initiator(request_data.request_initiator);
if (request_data.resource_type == RESOURCE_TYPE_MAIN_FRAME) {
new_request->set_first_party_url_policy(
net::URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT);
}
const Referrer referrer(request_data.referrer, request_data.referrer_policy);
SetReferrerForRequest(new_request.get(), referrer);
new_request->SetExtraRequestHeaders(headers);
storage::BlobStorageContext* blob_context =
GetBlobStorageContext(filter_->blob_storage_context());
if (request_data.request_body.get()) {
if (blob_context) {
AttachRequestBodyBlobDataHandles(
request_data.request_body.get(),
blob_context);
}
new_request->set_upload(UploadDataStreamBuilder::Build(
request_data.request_body.get(),
blob_context,
filter_->file_system_context(),
BrowserThread::GetMessageLoopProxyForThread(BrowserThread::FILE)
.get()));
}
bool allow_download = request_data.allow_download &&
IsResourceTypeFrame(request_data.resource_type);
bool do_not_prompt_for_login = request_data.do_not_prompt_for_login;
bool is_sync_load = sync_result != NULL;
ChildProcessSecurityPolicyImpl* policy =
ChildProcessSecurityPolicyImpl::GetInstance();
bool report_raw_headers = request_data.report_raw_headers;
if (report_raw_headers && !policy->CanReadRawCookies(child_id)) {
VLOG(1) << "Denied unauthorized request for raw headers";
report_raw_headers = false;
}
int load_flags =
BuildLoadFlagsForRequest(request_data, child_id, is_sync_load);
if (request_data.resource_type == RESOURCE_TYPE_PREFETCH ||
request_data.resource_type == RESOURCE_TYPE_FAVICON) {
do_not_prompt_for_login = true;
}
if (request_data.resource_type == RESOURCE_TYPE_IMAGE &&
HTTP_AUTH_RELATION_BLOCKED_CROSS ==
HttpAuthRelationTypeOf(request_data.url,
request_data.first_party_for_cookies)) {
do_not_prompt_for_login = true;
load_flags |= net::LOAD_DO_NOT_USE_EMBEDDED_IDENTITY;
}
bool support_async_revalidation =
!is_sync_load && async_revalidation_manager_ &&
AsyncRevalidationManager::QualifiesForAsyncRevalidation(request_data);
if (support_async_revalidation)
load_flags |= net::LOAD_SUPPORT_ASYNC_REVALIDATION;
if (is_sync_load) {
DCHECK_EQ(request_data.priority, net::MAXIMUM_PRIORITY);
DCHECK_NE(load_flags & net::LOAD_IGNORE_LIMITS, 0);
} else {
DCHECK_EQ(load_flags & net::LOAD_IGNORE_LIMITS, 0);
}
new_request->SetLoadFlags(load_flags);
ResourceRequestInfoImpl* extra_info = new ResourceRequestInfoImpl(
process_type, child_id, route_id,
-1, // frame_tree_node_id
request_data.origin_pid, request_id, request_data.render_frame_id,
request_data.is_main_frame, request_data.parent_is_main_frame,
request_data.resource_type, request_data.transition_type,
request_data.should_replace_current_entry,
false, // is download
false, // is stream
allow_download, request_data.has_user_gesture,
request_data.enable_load_timing, request_data.enable_upload_progress,
do_not_prompt_for_login, request_data.referrer_policy,
request_data.visiblity_state, resource_context, filter_->GetWeakPtr(),
report_raw_headers, !is_sync_load,
IsUsingLoFi(request_data.lofi_state, delegate_, *new_request,
resource_context,
request_data.resource_type == RESOURCE_TYPE_MAIN_FRAME),
support_async_revalidation ? request_data.headers : std::string());
extra_info->AssociateWithRequest(new_request.get());
if (new_request->url().SchemeIs(url::kBlobScheme)) {
storage::BlobProtocolHandler::SetRequestedBlobDataHandle(
new_request.get(),
filter_->blob_storage_context()->context()->GetBlobDataFromPublicURL(
new_request->url()));
}
const bool should_skip_service_worker =
request_data.skip_service_worker || is_sync_load;
ServiceWorkerRequestHandler::InitializeHandler(
new_request.get(), filter_->service_worker_context(), blob_context,
child_id, request_data.service_worker_provider_id,
should_skip_service_worker,
request_data.fetch_request_mode, request_data.fetch_credentials_mode,
request_data.fetch_redirect_mode, request_data.resource_type,
request_data.fetch_request_context_type, request_data.fetch_frame_type,
request_data.request_body);
if (base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableExperimentalWebPlatformFeatures)) {
ForeignFetchRequestHandler::InitializeHandler(
new_request.get(), filter_->service_worker_context(), blob_context,
child_id, request_data.service_worker_provider_id,
should_skip_service_worker,
request_data.fetch_request_mode, request_data.fetch_credentials_mode,
request_data.fetch_redirect_mode, request_data.resource_type,
request_data.fetch_request_context_type, request_data.fetch_frame_type,
request_data.request_body);
}
AppCacheInterceptor::SetExtraRequestInfo(
new_request.get(), filter_->appcache_service(), child_id,
request_data.appcache_host_id, request_data.resource_type,
request_data.should_reset_appcache);
scoped_ptr<ResourceHandler> handler(
CreateResourceHandler(
new_request.get(),
request_data, sync_result, route_id, process_type, child_id,
resource_context));
if (handler)
BeginRequestInternal(std::move(new_request), std::move(handler));
}
|
C
|
Chrome
| 1 |
CVE-2017-5089
|
https://www.cvedetails.com/cve/CVE-2017-5089/
|
CWE-20
|
https://github.com/chromium/chromium/commit/507241119f279c31766bd41c33d6ffb6851e2d7e
|
507241119f279c31766bd41c33d6ffb6851e2d7e
|
Migrate download_protection code to new DM token class.
Migrates RetrieveDMToken calls to use the new BrowserDMToken class.
Bug: 1020296
Change-Id: Icef580e243430d73b6c1c42b273a8540277481d9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1904234
Commit-Queue: Dominique Fauteux-Chapleau <[email protected]>
Reviewed-by: Tien Mai <[email protected]>
Reviewed-by: Daniel Rubery <[email protected]>
Cr-Commit-Position: refs/heads/master@{#714196}
|
BinaryUploadService::Request::Request(Callback callback)
: callback_(std::move(callback)) {}
|
BinaryUploadService::Request::Request(Callback callback)
: callback_(std::move(callback)) {}
|
C
|
Chrome
| 0 |
CVE-2015-3842
|
https://www.cvedetails.com/cve/CVE-2015-3842/
|
CWE-119
|
https://android.googlesource.com/platform/frameworks/av/+/aeea52da00d210587fb3ed895de3d5f2e0264c88
|
aeea52da00d210587fb3ed895de3d5f2e0264c88
|
audio effects: fix heap overflow
Check consistency of effect command reply sizes before
copying to reply address.
Also add null pointer check on reply size.
Also remove unused parameter warning.
Bug: 21953516.
Change-Id: I4cf00c12eaed696af28f3b7613f7e36f47a160c4
(cherry picked from commit 0f714a464d2425afe00d6450535e763131b40844)
|
int BassBoost_getParameter(EffectContext *pContext,
void *pParam,
uint32_t *pValueSize,
void *pValue){
int status = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;
int32_t param2;
char *name;
switch (param){
case BASSBOOST_PARAM_STRENGTH_SUPPORTED:
if (*pValueSize != sizeof(uint32_t)){
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid pValueSize1 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(uint32_t);
break;
case BASSBOOST_PARAM_STRENGTH:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid pValueSize2 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
default:
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid param %d", param);
return -EINVAL;
}
switch (param){
case BASSBOOST_PARAM_STRENGTH_SUPPORTED:
*(uint32_t *)pValue = 1;
break;
case BASSBOOST_PARAM_STRENGTH:
*(int16_t *)pValue = BassGetStrength(pContext);
break;
default:
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid param %d", param);
status = -EINVAL;
break;
}
return status;
} /* end BassBoost_getParameter */
|
int BassBoost_getParameter(EffectContext *pContext,
void *pParam,
uint32_t *pValueSize,
void *pValue){
int status = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;
int32_t param2;
char *name;
switch (param){
case BASSBOOST_PARAM_STRENGTH_SUPPORTED:
if (*pValueSize != sizeof(uint32_t)){
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid pValueSize1 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(uint32_t);
break;
case BASSBOOST_PARAM_STRENGTH:
if (*pValueSize != sizeof(int16_t)){
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid pValueSize2 %d", *pValueSize);
return -EINVAL;
}
*pValueSize = sizeof(int16_t);
break;
default:
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid param %d", param);
return -EINVAL;
}
switch (param){
case BASSBOOST_PARAM_STRENGTH_SUPPORTED:
*(uint32_t *)pValue = 1;
break;
case BASSBOOST_PARAM_STRENGTH:
*(int16_t *)pValue = BassGetStrength(pContext);
break;
default:
ALOGV("\tLVM_ERROR : BassBoost_getParameter() invalid param %d", param);
status = -EINVAL;
break;
}
return status;
} /* end BassBoost_getParameter */
|
C
|
Android
| 0 |
CVE-2016-7972
|
https://www.cvedetails.com/cve/CVE-2016-7972/
|
CWE-399
|
https://github.com/libass/libass/pull/240/commits/aa54e0b59200a994d50a346b5d7ac818ebcf2d4b
|
aa54e0b59200a994d50a346b5d7ac818ebcf2d4b
|
shaper: fix reallocation
Update the variable that tracks the allocated size. This potentially
improves performance and avoid some side effects, which lead to
undefined behavior in some cases.
Fixes fuzzer test case id:000051,sig:11,sync:fuzzer3,src:004221.
|
static void ass_shaper_skip_characters(TextInfo *text_info)
{
int i;
GlyphInfo *glyphs = text_info->glyphs;
for (i = 0; i < text_info->length; i++) {
if ((glyphs[i].symbol <= 0x202e && glyphs[i].symbol >= 0x202a)
|| (glyphs[i].symbol <= 0x200f && glyphs[i].symbol >= 0x200b)
|| (glyphs[i].symbol <= 0x2063 && glyphs[i].symbol >= 0x2060)
|| glyphs[i].symbol == 0xfeff
|| glyphs[i].symbol == 0x00ad
|| glyphs[i].symbol == 0x034f) {
glyphs[i].symbol = 0;
glyphs[i].skip++;
}
}
}
|
static void ass_shaper_skip_characters(TextInfo *text_info)
{
int i;
GlyphInfo *glyphs = text_info->glyphs;
for (i = 0; i < text_info->length; i++) {
if ((glyphs[i].symbol <= 0x202e && glyphs[i].symbol >= 0x202a)
|| (glyphs[i].symbol <= 0x200f && glyphs[i].symbol >= 0x200b)
|| (glyphs[i].symbol <= 0x2063 && glyphs[i].symbol >= 0x2060)
|| glyphs[i].symbol == 0xfeff
|| glyphs[i].symbol == 0x00ad
|| glyphs[i].symbol == 0x034f) {
glyphs[i].symbol = 0;
glyphs[i].skip++;
}
}
}
|
C
|
libass
| 0 |
CVE-2016-1691
|
https://www.cvedetails.com/cve/CVE-2016-1691/
|
CWE-119
|
https://github.com/chromium/chromium/commit/e3aa8a56706c4abe208934d5c294f7b594b8b693
|
e3aa8a56706c4abe208934d5c294f7b594b8b693
|
Enforce the WebUsbAllowDevicesForUrls policy
This change modifies UsbChooserContext to use the UsbAllowDevicesForUrls
class to consider devices allowed by the WebUsbAllowDevicesForUrls
policy. The WebUsbAllowDevicesForUrls policy overrides the other WebUSB
policies. Unit tests are also added to ensure that the policy is being
enforced correctly.
The design document for this feature is found at:
https://docs.google.com/document/d/1MPvsrWiVD_jAC8ELyk8njFpy6j1thfVU5aWT3TCWE8w
Bug: 854329
Change-Id: I5f82e662ca9dc544da5918eae766b5535a31296b
Reviewed-on: https://chromium-review.googlesource.com/c/1259289
Commit-Queue: Ovidio Henriquez <[email protected]>
Reviewed-by: Reilly Grant <[email protected]>
Reviewed-by: Julian Pastarmov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#597926}
|
bool IsShowingInterstitial(content::WebContents* tab) {
if (AreCommittedInterstitialsEnabled()) {
security_interstitials::SecurityInterstitialTabHelper* helper =
security_interstitials::SecurityInterstitialTabHelper::
FromWebContents(tab);
if (!helper) {
return false;
}
return helper
->GetBlockingPageForCurrentlyCommittedNavigationForTesting() !=
nullptr;
}
return tab->GetInterstitialPage() != nullptr;
}
|
bool IsShowingInterstitial(content::WebContents* tab) {
if (AreCommittedInterstitialsEnabled()) {
security_interstitials::SecurityInterstitialTabHelper* helper =
security_interstitials::SecurityInterstitialTabHelper::
FromWebContents(tab);
if (!helper) {
return false;
}
return helper
->GetBlockingPageForCurrentlyCommittedNavigationForTesting() !=
nullptr;
}
return tab->GetInterstitialPage() != nullptr;
}
|
C
|
Chrome
| 0 |
CVE-2018-16435
|
https://www.cvedetails.com/cve/CVE-2018-16435/
|
CWE-190
|
https://github.com/mm2/Little-CMS/commit/768f70ca405cd3159d990e962d54456773bb8cf8
|
768f70ca405cd3159d990e962d54456773bb8cf8
|
Upgrade Visual studio 2017 15.8
- Upgrade to 15.8
- Add check on CGATS memory allocation (thanks to Quang Nguyen for
pointing out this)
|
int CMSEXPORT cmsIT8EnumDataFormat(cmsHANDLE hIT8, char ***SampleNames)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
TABLE* t;
_cmsAssert(hIT8 != NULL);
t = GetTable(it8);
if (SampleNames)
*SampleNames = t -> DataFormat;
return t -> nSamples;
}
|
int CMSEXPORT cmsIT8EnumDataFormat(cmsHANDLE hIT8, char ***SampleNames)
{
cmsIT8* it8 = (cmsIT8*) hIT8;
TABLE* t;
_cmsAssert(hIT8 != NULL);
t = GetTable(it8);
if (SampleNames)
*SampleNames = t -> DataFormat;
return t -> nSamples;
}
|
C
|
Little-CMS
| 0 |
CVE-2015-3288
|
https://www.cvedetails.com/cve/CVE-2015-3288/
|
CWE-20
|
https://github.com/torvalds/linux/commit/6b7339f4c31ad69c8e9c0b2859276e22cf72176d
|
6b7339f4c31ad69c8e9c0b2859276e22cf72176d
|
mm: avoid setting up anonymous pages into file mapping
Reading page fault handler code I've noticed that under right
circumstances kernel would map anonymous pages into file mappings: if
the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated
on ->mmap(), kernel would handle page fault to not populated pte with
do_anonymous_page().
Let's change page fault handler to use do_anonymous_page() only on
anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not
shared.
For file mappings without vm_ops->fault() or shred VMA without vm_ops,
page fault on pte_none() entry would lead to SIGBUS.
Signed-off-by: Kirill A. Shutemov <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Willy Tarreau <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
void sync_mm_rss(struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (current->rss_stat.count[i]) {
add_mm_counter(mm, i, current->rss_stat.count[i]);
current->rss_stat.count[i] = 0;
}
}
current->rss_stat.events = 0;
}
|
void sync_mm_rss(struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (current->rss_stat.count[i]) {
add_mm_counter(mm, i, current->rss_stat.count[i]);
current->rss_stat.count[i] = 0;
}
}
current->rss_stat.events = 0;
}
|
C
|
linux
| 0 |
CVE-2017-0377
|
https://www.cvedetails.com/cve/CVE-2017-0377/
|
CWE-200
|
https://github.com/torproject/tor/commit/665baf5ed5c6186d973c46cdea165c0548027350
|
665baf5ed5c6186d973c46cdea165c0548027350
|
Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377.
|
node_get_mutable_by_id(const char *identity_digest)
{
node_t search, *node;
if (PREDICT_UNLIKELY(the_nodelist == NULL))
return NULL;
memcpy(&search.identity, identity_digest, DIGEST_LEN);
node = HT_FIND(nodelist_map, &the_nodelist->nodes_by_id, &search);
return node;
}
|
node_get_mutable_by_id(const char *identity_digest)
{
node_t search, *node;
if (PREDICT_UNLIKELY(the_nodelist == NULL))
return NULL;
memcpy(&search.identity, identity_digest, DIGEST_LEN);
node = HT_FIND(nodelist_map, &the_nodelist->nodes_by_id, &search);
return node;
}
|
C
|
tor
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void voidMethodClampUnsignedShortArgMethod(const v8::FunctionCallbackInfo<v8::Value>& info)
{
ExceptionState exceptionState(ExceptionState::ExecutionContext, "voidMethodClampUnsignedShortArg", "TestObjectPython", info.Holder(), info.GetIsolate());
if (UNLIKELY(info.Length() < 1)) {
exceptionState.throwTypeError(ExceptionMessages::notEnoughArguments(1, info.Length()));
exceptionState.throwIfNeeded();
return;
}
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
unsigned clampUnsignedShortArg = 0;
V8TRYCATCH_VOID(double, clampUnsignedShortArgNativeValue, info[0]->NumberValue());
if (!std::isnan(clampUnsignedShortArgNativeValue))
clampUnsignedShortArg = clampTo<unsigned short>(clampUnsignedShortArgNativeValue);
imp->voidMethodClampUnsignedShortArg(clampUnsignedShortArg);
}
|
static void voidMethodClampUnsignedShortArgMethod(const v8::FunctionCallbackInfo<v8::Value>& info)
{
ExceptionState exceptionState(ExceptionState::ExecutionContext, "voidMethodClampUnsignedShortArg", "TestObjectPython", info.Holder(), info.GetIsolate());
if (UNLIKELY(info.Length() < 1)) {
exceptionState.throwTypeError(ExceptionMessages::notEnoughArguments(1, info.Length()));
exceptionState.throwIfNeeded();
return;
}
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
unsigned clampUnsignedShortArg = 0;
V8TRYCATCH_VOID(double, clampUnsignedShortArgNativeValue, info[0]->NumberValue());
if (!std::isnan(clampUnsignedShortArgNativeValue))
clampUnsignedShortArg = clampTo<unsigned short>(clampUnsignedShortArgNativeValue);
imp->voidMethodClampUnsignedShortArg(clampUnsignedShortArg);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/10c7ed8f076afd290fccf283d8bc416959722ca3
|
10c7ed8f076afd290fccf283d8bc416959722ca3
|
Fix bug 130606: Panels [WIN]: Alt-Tabbing to a minimized panel no longer restores it
BUG=130606
TEST=Manual test by minimizing panel and alt-tabbing to it
Review URL: https://chromiumcodereview.appspot.com/10509011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@140498 0039d316-1c4b-4281-b951-d872f2087c98
|
void PanelBrowserView::OnWidgetActivationChanged(views::Widget* widget,
bool active) {
::BrowserView::OnWidgetActivationChanged(widget, active);
#if defined(OS_WIN) && !defined(USE_AURA)
bool focused = active &&
GetFrameView()->GetWidget()->GetNativeView() == ::GetForegroundWindow();
#else
NOTIMPLEMENTED();
bool focused = active;
#endif
if (focused_ == focused)
return;
focused_ = focused;
// Expand the panel if the minimized panel is activated by means other than
// clicking on its titlebar. This is the workaround to support restoring the
// minimized panel by other means, like alt-tabbing, win-tabbing, or clicking
// the taskbar icon. Note that this workaround does not work for one edge
// case: the mouse happens to be at the minimized panel when the user tries to
// bring up the panel with the above alternatives.
// When the user clicks on the minimized panel, the panel expansion will be
// done when we process the mouse button pressed message.
if (focused_ && panel_->IsMinimized() &&
gfx::Screen::GetWindowAtCursorScreenPoint() !=
widget->GetNativeWindow()) {
panel_->Restore();
}
panel()->OnActiveStateChanged(focused);
}
|
void PanelBrowserView::OnWidgetActivationChanged(views::Widget* widget,
bool active) {
::BrowserView::OnWidgetActivationChanged(widget, active);
#if defined(OS_WIN) && !defined(USE_AURA)
bool focused = active &&
GetFrameView()->GetWidget()->GetNativeView() == ::GetForegroundWindow();
#else
NOTIMPLEMENTED();
bool focused = active;
#endif
if (focused_ == focused)
return;
focused_ = focused;
panel()->OnActiveStateChanged(focused);
}
|
C
|
Chrome
| 1 |
CVE-2012-2895
|
https://www.cvedetails.com/cve/CVE-2012-2895/
|
CWE-119
|
https://github.com/chromium/chromium/commit/16dcd30c215801941d9890859fd79a234128fc3e
|
16dcd30c215801941d9890859fd79a234128fc3e
|
Refactors to simplify rename pathway in DownloadFileManager.
This is https://chromiumcodereview.appspot.com/10668004 / r144817 (reverted
due to CrOS failure) with the completion logic moved to after the
auto-opening. The tests that test the auto-opening (for web store install)
were waiting for download completion to check install, and hence were
failing when completion was moved earlier.
Doing this right would probably require another state (OPENED).
BUG=123998
BUG-134930
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10701040
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145157 0039d316-1c4b-4281-b951-d872f2087c98
|
void ChromeDownloadManagerDelegate::RemoveItemFromPersistentStore(
DownloadItem* item) {
download_history_->RemoveEntry(item);
}
|
void ChromeDownloadManagerDelegate::RemoveItemFromPersistentStore(
DownloadItem* item) {
download_history_->RemoveEntry(item);
}
|
C
|
Chrome
| 0 |
CVE-2016-1613
|
https://www.cvedetails.com/cve/CVE-2016-1613/
| null |
https://github.com/chromium/chromium/commit/7394cf6f43d7a86630d3eb1c728fd63c621b5530
|
7394cf6f43d7a86630d3eb1c728fd63c621b5530
|
Connect the LocalDB to TabManager.
Bug: 773382
Change-Id: Iec8fe5226ee175105d51f300f30b4865478ac099
Reviewed-on: https://chromium-review.googlesource.com/1118611
Commit-Queue: Sébastien Marchand <[email protected]>
Reviewed-by: François Doray <[email protected]>
Cr-Commit-Position: refs/heads/master@{#572871}
|
bool TabLifecycleUnitSource::TabLifecycleUnit::CanFreeze(
DecisionDetails* decision_details) const {
DCHECK(decision_details->reasons().empty());
if (!IsValidStateChange(GetState(), LifecycleUnitState::PENDING_FREEZE,
StateChangeReason::BROWSER_INITIATED)) {
return false;
}
if (TabLoadTracker::Get()->GetLoadingState(GetWebContents()) !=
TabLoadTracker::LoadingState::LOADED) {
return false;
}
if (GetWebContents()->GetVisibility() == content::Visibility::VISIBLE)
decision_details->AddReason(DecisionFailureReason::LIVE_STATE_VISIBLE);
IsMediaTabImpl(decision_details);
// Consult the local database to see if this tab could try to communicate with
// the user while in background (don't check for the visibility here as
// there's already a check for that above).
CheckIfTabCanCommunicateWithUserWhileInBackground(GetWebContents(),
decision_details);
if (decision_details->reasons().empty()) {
decision_details->AddReason(
DecisionSuccessReason::HEURISTIC_OBSERVED_TO_BE_SAFE);
}
return decision_details->IsPositive();
}
|
bool TabLifecycleUnitSource::TabLifecycleUnit::CanFreeze(
DecisionDetails* decision_details) const {
DCHECK(decision_details->reasons().empty());
if (!IsValidStateChange(GetState(), LifecycleUnitState::PENDING_FREEZE,
StateChangeReason::BROWSER_INITIATED)) {
return false;
}
if (TabLoadTracker::Get()->GetLoadingState(GetWebContents()) !=
TabLoadTracker::LoadingState::LOADED) {
return false;
}
if (GetWebContents()->GetVisibility() == content::Visibility::VISIBLE)
decision_details->AddReason(DecisionFailureReason::LIVE_STATE_VISIBLE);
IsMediaTabImpl(decision_details);
if (decision_details->reasons().empty()) {
decision_details->AddReason(
DecisionSuccessReason::HEURISTIC_OBSERVED_TO_BE_SAFE);
DCHECK(decision_details->IsPositive());
}
return decision_details->IsPositive();
}
|
C
|
Chrome
| 1 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void cachedAttribute1AttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectV8Internal::cachedAttribute1AttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
static void cachedAttribute1AttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectV8Internal::cachedAttribute1AttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
C
|
Chrome
| 0 |
CVE-2018-17468
|
https://www.cvedetails.com/cve/CVE-2018-17468/
|
CWE-200
|
https://github.com/chromium/chromium/commit/5fe74f831fddb92afa5ddfe46490bb49f083132b
|
5fe74f831fddb92afa5ddfe46490bb49f083132b
|
Do not forward resource timing to parent frame after back-forward navigation
LocalFrame has |should_send_resource_timing_info_to_parent_| flag not to
send timing info to parent except for the first navigation. This flag is
cleared when the first timing is sent to parent, however this does not happen
if iframe's first navigation was by back-forward navigation. For such
iframes, we shouldn't send timings to parent at all.
Bug: 876822
Change-Id: I128b51a82ef278c439548afc8283ae63abdef5c5
Reviewed-on: https://chromium-review.googlesource.com/1186215
Reviewed-by: Kinuko Yasuda <[email protected]>
Commit-Queue: Kunihiko Sakamoto <[email protected]>
Cr-Commit-Position: refs/heads/master@{#585736}
|
void WebLocalFrameImpl::CommitNavigation(
const WebURLRequest& request,
WebFrameLoadType web_frame_load_type,
const WebHistoryItem& item,
bool is_client_redirect,
const base::UnguessableToken& devtools_navigation_token,
std::unique_ptr<WebDocumentLoader::ExtraData> extra_data,
const WebNavigationTimings& navigation_timings) {
DCHECK(GetFrame());
DCHECK(!request.IsNull());
DCHECK(!request.Url().ProtocolIs("javascript"));
const ResourceRequest& resource_request = request.ToResourceRequest();
if (GetTextFinder())
GetTextFinder()->ClearActiveFindMatch();
FrameLoadRequest frame_request =
FrameLoadRequest(nullptr, resource_request, /*frame_name=*/AtomicString(),
kCheckContentSecurityPolicy, devtools_navigation_token);
if (is_client_redirect)
frame_request.SetClientRedirect(ClientRedirectPolicy::kClientRedirect);
HistoryItem* history_item = item;
GetFrame()->Loader().CommitNavigation(frame_request, web_frame_load_type,
history_item, std::move(extra_data),
navigation_timings);
}
|
void WebLocalFrameImpl::CommitNavigation(
const WebURLRequest& request,
WebFrameLoadType web_frame_load_type,
const WebHistoryItem& item,
bool is_client_redirect,
const base::UnguessableToken& devtools_navigation_token,
std::unique_ptr<WebDocumentLoader::ExtraData> extra_data,
const WebNavigationTimings& navigation_timings) {
DCHECK(GetFrame());
DCHECK(!request.IsNull());
DCHECK(!request.Url().ProtocolIs("javascript"));
const ResourceRequest& resource_request = request.ToResourceRequest();
if (GetTextFinder())
GetTextFinder()->ClearActiveFindMatch();
FrameLoadRequest frame_request =
FrameLoadRequest(nullptr, resource_request, /*frame_name=*/AtomicString(),
kCheckContentSecurityPolicy, devtools_navigation_token);
if (is_client_redirect)
frame_request.SetClientRedirect(ClientRedirectPolicy::kClientRedirect);
HistoryItem* history_item = item;
GetFrame()->Loader().CommitNavigation(frame_request, web_frame_load_type,
history_item, std::move(extra_data),
navigation_timings);
}
|
C
|
Chrome
| 0 |
CVE-2017-1000418
|
https://www.cvedetails.com/cve/CVE-2017-1000418/
|
CWE-119
|
https://github.com/Mindwerks/wildmidi/commit/814f31d8eceda8401eb812fc2e94ed143fdad0ab
|
814f31d8eceda8401eb812fc2e94ed143fdad0ab
|
wildmidi_lib.c (WildMidi_Open, WildMidi_OpenBuffer): refuse to proceed if less then 18 bytes of input
Fixes bug #178.
|
static char** WM_LC_Tokenize_Line(char *line_data) {
int line_length = (int) strlen(line_data);
int token_data_length = 0;
int line_ofs = 0;
int token_start = 0;
char **token_data = NULL;
int token_count = 0;
if (!line_length) return (NULL);
do {
/* ignore everything after # */
if (line_data[line_ofs] == '#') {
break;
}
if ((line_data[line_ofs] == ' ') || (line_data[line_ofs] == '\t')) {
/* whitespace means we aren't in a token */
if (token_start) {
token_start = 0;
line_data[line_ofs] = '\0';
}
} else {
if (!token_start) {
/* the start of a token in the line */
token_start = 1;
if (token_count >= token_data_length) {
token_data_length += TOKEN_CNT_INC;
token_data = realloc(token_data, token_data_length * sizeof(char *));
if (token_data == NULL) {
_WM_GLOBAL_ERROR(__FUNCTION__, __LINE__, WM_ERR_MEM,"to parse config", errno);
return (NULL);
}
}
token_data[token_count] = &line_data[line_ofs];
token_count++;
}
}
line_ofs++;
} while (line_ofs != line_length);
/* if we have found some tokens then add a null token to the end */
if (token_count) {
if (token_count >= token_data_length) {
token_data = realloc(token_data,
((token_count + 1) * sizeof(char *)));
}
token_data[token_count] = NULL;
}
return (token_data);
}
|
static char** WM_LC_Tokenize_Line(char *line_data) {
int line_length = (int) strlen(line_data);
int token_data_length = 0;
int line_ofs = 0;
int token_start = 0;
char **token_data = NULL;
int token_count = 0;
if (!line_length) return (NULL);
do {
/* ignore everything after # */
if (line_data[line_ofs] == '#') {
break;
}
if ((line_data[line_ofs] == ' ') || (line_data[line_ofs] == '\t')) {
/* whitespace means we aren't in a token */
if (token_start) {
token_start = 0;
line_data[line_ofs] = '\0';
}
} else {
if (!token_start) {
/* the start of a token in the line */
token_start = 1;
if (token_count >= token_data_length) {
token_data_length += TOKEN_CNT_INC;
token_data = realloc(token_data, token_data_length * sizeof(char *));
if (token_data == NULL) {
_WM_GLOBAL_ERROR(__FUNCTION__, __LINE__, WM_ERR_MEM,"to parse config", errno);
return (NULL);
}
}
token_data[token_count] = &line_data[line_ofs];
token_count++;
}
}
line_ofs++;
} while (line_ofs != line_length);
/* if we have found some tokens then add a null token to the end */
if (token_count) {
if (token_count >= token_data_length) {
token_data = realloc(token_data,
((token_count + 1) * sizeof(char *)));
}
token_data[token_count] = NULL;
}
return (token_data);
}
|
C
|
wildmidi
| 0 |
CVE-2018-1000127
|
https://www.cvedetails.com/cve/CVE-2018-1000127/
|
CWE-190
|
https://github.com/memcached/memcached/commit/a8c4a82787b8b6c256d61bd5c42fb7f92d1bae00
|
a8c4a82787b8b6c256d61bd5c42fb7f92d1bae00
|
Don't overflow item refcount on get
Counts as a miss if the refcount is too high. ASCII multigets are the only
time refcounts can be held for so long.
doing a dirty read of refcount. is aligned.
trying to avoid adding an extra refcount branch for all calls of item_get due
to performance. might be able to move it in there after logging refactoring
simplifies some of the branches.
|
static void add_bin_header(conn *c, uint16_t err, uint8_t hdr_len, uint16_t key_len, uint32_t body_len) {
protocol_binary_response_header* header;
assert(c);
c->msgcurr = 0;
c->msgused = 0;
c->iovused = 0;
if (add_msghdr(c) != 0) {
/* This should never run out of memory because iov and msg lists
* have minimum sizes big enough to hold an error response.
*/
out_of_memory(c, "SERVER_ERROR out of memory adding binary header");
return;
}
header = (protocol_binary_response_header *)c->wbuf;
header->response.magic = (uint8_t)PROTOCOL_BINARY_RES;
header->response.opcode = c->binary_header.request.opcode;
header->response.keylen = (uint16_t)htons(key_len);
header->response.extlen = (uint8_t)hdr_len;
header->response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES;
header->response.status = (uint16_t)htons(err);
header->response.bodylen = htonl(body_len);
header->response.opaque = c->opaque;
header->response.cas = htonll(c->cas);
if (settings.verbose > 1) {
int ii;
fprintf(stderr, ">%d Writing bin response:", c->sfd);
for (ii = 0; ii < sizeof(header->bytes); ++ii) {
if (ii % 4 == 0) {
fprintf(stderr, "\n>%d ", c->sfd);
}
fprintf(stderr, " 0x%02x", header->bytes[ii]);
}
fprintf(stderr, "\n");
}
add_iov(c, c->wbuf, sizeof(header->response));
}
|
static void add_bin_header(conn *c, uint16_t err, uint8_t hdr_len, uint16_t key_len, uint32_t body_len) {
protocol_binary_response_header* header;
assert(c);
c->msgcurr = 0;
c->msgused = 0;
c->iovused = 0;
if (add_msghdr(c) != 0) {
/* This should never run out of memory because iov and msg lists
* have minimum sizes big enough to hold an error response.
*/
out_of_memory(c, "SERVER_ERROR out of memory adding binary header");
return;
}
header = (protocol_binary_response_header *)c->wbuf;
header->response.magic = (uint8_t)PROTOCOL_BINARY_RES;
header->response.opcode = c->binary_header.request.opcode;
header->response.keylen = (uint16_t)htons(key_len);
header->response.extlen = (uint8_t)hdr_len;
header->response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES;
header->response.status = (uint16_t)htons(err);
header->response.bodylen = htonl(body_len);
header->response.opaque = c->opaque;
header->response.cas = htonll(c->cas);
if (settings.verbose > 1) {
int ii;
fprintf(stderr, ">%d Writing bin response:", c->sfd);
for (ii = 0; ii < sizeof(header->bytes); ++ii) {
if (ii % 4 == 0) {
fprintf(stderr, "\n>%d ", c->sfd);
}
fprintf(stderr, " 0x%02x", header->bytes[ii]);
}
fprintf(stderr, "\n");
}
add_iov(c, c->wbuf, sizeof(header->response));
}
|
C
|
memcached
| 0 |
CVE-2013-6621
|
https://www.cvedetails.com/cve/CVE-2013-6621/
|
CWE-399
|
https://github.com/chromium/chromium/commit/4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
Add logging to figure out which IPC we're failing to deserialize in RenderFrame.
BUG=369553
[email protected]
Review URL: https://codereview.chromium.org/263833020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268565 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderFrameImpl::OnBeforeUnload() {
CHECK(!frame_->parent());
base::TimeTicks before_unload_start_time = base::TimeTicks::Now();
bool proceed = frame_->dispatchBeforeUnloadEvent();
base::TimeTicks before_unload_end_time = base::TimeTicks::Now();
Send(new FrameHostMsg_BeforeUnload_ACK(routing_id_, proceed,
before_unload_start_time,
before_unload_end_time));
}
|
void RenderFrameImpl::OnBeforeUnload() {
CHECK(!frame_->parent());
base::TimeTicks before_unload_start_time = base::TimeTicks::Now();
bool proceed = frame_->dispatchBeforeUnloadEvent();
base::TimeTicks before_unload_end_time = base::TimeTicks::Now();
Send(new FrameHostMsg_BeforeUnload_ACK(routing_id_, proceed,
before_unload_start_time,
before_unload_end_time));
}
|
C
|
Chrome
| 0 |
CVE-2010-4250
|
https://www.cvedetails.com/cve/CVE-2010-4250/
|
CWE-399
|
https://github.com/torvalds/linux/commit/a2ae4cc9a16e211c8a128ba10d22a85431f093ab
|
a2ae4cc9a16e211c8a128ba10d22a85431f093ab
|
inotify: stop kernel memory leak on file creation failure
If inotify_init is unable to allocate a new file for the new inotify
group we leak the new group. This patch drops the reference on the
group on file allocation failure.
Reported-by: Vegard Nossum <[email protected]>
cc: [email protected]
Signed-off-by: Eric Paris <[email protected]>
|
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
return group;
}
|
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
return group;
}
|
C
|
linux
| 0 |
CVE-2017-15420
|
https://www.cvedetails.com/cve/CVE-2017-15420/
|
CWE-20
|
https://github.com/chromium/chromium/commit/56a84aa67bb071a33a48ac1481b555c48e0a9a59
|
56a84aa67bb071a33a48ac1481b555c48e0a9a59
|
Do not use NavigationEntry to block history navigations.
This is no longer necessary after r477371.
BUG=777419
TEST=See bug for repro steps.
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_site_isolation
Change-Id: I701e4d4853858281b43e3743b12274dbeadfbf18
Reviewed-on: https://chromium-review.googlesource.com/733959
Reviewed-by: Devlin <[email protected]>
Reviewed-by: Nasko Oskov <[email protected]>
Commit-Queue: Charlie Reis <[email protected]>
Cr-Commit-Position: refs/heads/master@{#511942}
|
PlzNavigateNavigationHandleImplBrowserTest() {}
|
PlzNavigateNavigationHandleImplBrowserTest() {}
|
C
|
Chrome
| 0 |
CVE-2018-19045
|
https://www.cvedetails.com/cve/CVE-2018-19045/
|
CWE-200
|
https://github.com/acassen/keepalived/commit/c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
|
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
|
Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]>
|
make_syslog_ident(const char* name)
{
size_t ident_len = strlen(name) + 1;
char *ident;
#if HAVE_DECL_CLONE_NEWNET
if (global_data->network_namespace)
ident_len += strlen(global_data->network_namespace) + 1;
#endif
if (global_data->instance_name)
ident_len += strlen(global_data->instance_name) + 1;
/* If we are writing MALLOC/FREE info to the log, we have
* trouble FREEing the syslog_ident */
#ifndef _MEM_CHECK_LOG_
ident = MALLOC(ident_len);
#else
ident = malloc(ident_len);
#endif
if (!ident)
return NULL;
strcpy(ident, name);
#if HAVE_DECL_CLONE_NEWNET
if (global_data->network_namespace) {
strcat(ident, "_");
strcat(ident, global_data->network_namespace);
}
#endif
if (global_data->instance_name) {
strcat(ident, "_");
strcat(ident, global_data->instance_name);
}
return ident;
}
|
make_syslog_ident(const char* name)
{
size_t ident_len = strlen(name) + 1;
char *ident;
#if HAVE_DECL_CLONE_NEWNET
if (global_data->network_namespace)
ident_len += strlen(global_data->network_namespace) + 1;
#endif
if (global_data->instance_name)
ident_len += strlen(global_data->instance_name) + 1;
/* If we are writing MALLOC/FREE info to the log, we have
* trouble FREEing the syslog_ident */
#ifndef _MEM_CHECK_LOG_
ident = MALLOC(ident_len);
#else
ident = malloc(ident_len);
#endif
if (!ident)
return NULL;
strcpy(ident, name);
#if HAVE_DECL_CLONE_NEWNET
if (global_data->network_namespace) {
strcat(ident, "_");
strcat(ident, global_data->network_namespace);
}
#endif
if (global_data->instance_name) {
strcat(ident, "_");
strcat(ident, global_data->instance_name);
}
return ident;
}
|
C
|
keepalived
| 0 |
CVE-2016-4071
|
https://www.cvedetails.com/cve/CVE-2016-4071/
|
CWE-20
|
https://git.php.net/?p=php-src.git;a=commit;h=6e25966544fb1d2f3d7596e060ce9c9269bbdcf8
|
6e25966544fb1d2f3d7596e060ce9c9269bbdcf8
| null |
static int php_snmp_write_valueretrieval(php_snmp_object *snmp_object, zval *newval)
{
zval ztmp;
int ret = SUCCESS;
if (Z_TYPE_P(newval) != IS_LONG) {
ztmp = *newval;
zval_copy_ctor(&ztmp);
convert_to_long(&ztmp);
newval = &ztmp;
}
if (Z_LVAL_P(newval) >= 0 && Z_LVAL_P(newval) <= (SNMP_VALUE_LIBRARY|SNMP_VALUE_PLAIN|SNMP_VALUE_OBJECT)) {
snmp_object->valueretrieval = Z_LVAL_P(newval);
} else {
php_error_docref(NULL, E_WARNING, "Unknown SNMP value retrieval method '%pd'", Z_LVAL_P(newval));
ret = FAILURE;
}
if (newval == &ztmp) {
zval_dtor(newval);
}
return ret;
}
|
static int php_snmp_write_valueretrieval(php_snmp_object *snmp_object, zval *newval)
{
zval ztmp;
int ret = SUCCESS;
if (Z_TYPE_P(newval) != IS_LONG) {
ztmp = *newval;
zval_copy_ctor(&ztmp);
convert_to_long(&ztmp);
newval = &ztmp;
}
if (Z_LVAL_P(newval) >= 0 && Z_LVAL_P(newval) <= (SNMP_VALUE_LIBRARY|SNMP_VALUE_PLAIN|SNMP_VALUE_OBJECT)) {
snmp_object->valueretrieval = Z_LVAL_P(newval);
} else {
php_error_docref(NULL, E_WARNING, "Unknown SNMP value retrieval method '%pd'", Z_LVAL_P(newval));
ret = FAILURE;
}
if (newval == &ztmp) {
zval_dtor(newval);
}
return ret;
}
|
C
|
php
| 0 |
CVE-2018-16080
|
https://www.cvedetails.com/cve/CVE-2018-16080/
|
CWE-20
|
https://github.com/chromium/chromium/commit/c552cd7b8a0862f6b3c8c6a07f98bda3721101eb
|
c552cd7b8a0862f6b3c8c6a07f98bda3721101eb
|
Mac: turn popups into new tabs while in fullscreen.
It's platform convention to show popups as new tabs while in
non-HTML5 fullscreen. (Popups cause tabs to lose HTML5 fullscreen.)
This was implemented for Cocoa in a BrowserWindow override, but
it makes sense to just stick it into Browser and remove a ton
of override code put in just to support this.
BUG=858929, 868416
TEST=as in bugs
Change-Id: I43471f242813ec1159d9c690bab73dab3e610b7d
Reviewed-on: https://chromium-review.googlesource.com/1153455
Reviewed-by: Sidney San Martín <[email protected]>
Commit-Queue: Avi Drissman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#578755}
|
WebContentsModalDialogHost* BrowserView::GetWebContentsModalDialogHost() {
return GetBrowserViewLayout()->GetWebContentsModalDialogHost();
}
|
WebContentsModalDialogHost* BrowserView::GetWebContentsModalDialogHost() {
return GetBrowserViewLayout()->GetWebContentsModalDialogHost();
}
|
C
|
Chrome
| 0 |
CVE-2015-8839
|
https://www.cvedetails.com/cve/CVE-2015-8839/
|
CWE-362
|
https://github.com/torvalds/linux/commit/ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
|
ea3d7209ca01da209cda6f0dea8be9cc4b7a933b
|
ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static int ext4_readpage(struct file *file, struct page *page)
{
int ret = -EAGAIN;
struct inode *inode = page->mapping->host;
trace_ext4_readpage(page);
if (ext4_has_inline_data(inode))
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
return ext4_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
|
static int ext4_readpage(struct file *file, struct page *page)
{
int ret = -EAGAIN;
struct inode *inode = page->mapping->host;
trace_ext4_readpage(page);
if (ext4_has_inline_data(inode))
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
return ext4_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
|
C
|
linux
| 0 |
CVE-2016-10746
|
https://www.cvedetails.com/cve/CVE-2016-10746/
|
CWE-254
|
https://github.com/libvirt/libvirt/commit/506e9d6c2d4baaf580d489fff0690c0ff2ff588f
|
506e9d6c2d4baaf580d489fff0690c0ff2ff588f
|
virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <[email protected]>
|
virDomainCoreDumpWithFormat(virDomainPtr domain, const char *to,
unsigned int dumpformat, unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(domain, "to=%s, dumpformat=%u, flags=%x",
to, dumpformat, flags);
virResetLastError();
virCheckDomainReturn(domain, -1);
conn = domain->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(to, error);
if (dumpformat >= VIR_DOMAIN_CORE_DUMP_FORMAT_LAST) {
virReportInvalidArg(flags, _("dumpformat '%d' is not supported"),
dumpformat);
goto error;
}
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DUMP_CRASH, VIR_DUMP_LIVE, error);
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DUMP_CRASH, VIR_DUMP_RESET, error);
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DUMP_LIVE, VIR_DUMP_RESET, error);
if (conn->driver->domainCoreDumpWithFormat) {
int ret;
char *absolute_to;
/* We must absolutize the file path as the save is done out of process */
if (virFileAbsPath(to, &absolute_to) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("could not build absolute core file path"));
goto error;
}
ret = conn->driver->domainCoreDumpWithFormat(domain, absolute_to,
dumpformat, flags);
VIR_FREE(absolute_to);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(domain->conn);
return -1;
}
|
virDomainCoreDumpWithFormat(virDomainPtr domain, const char *to,
unsigned int dumpformat, unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(domain, "to=%s, dumpformat=%u, flags=%x",
to, dumpformat, flags);
virResetLastError();
virCheckDomainReturn(domain, -1);
conn = domain->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(to, error);
if (dumpformat >= VIR_DOMAIN_CORE_DUMP_FORMAT_LAST) {
virReportInvalidArg(flags, _("dumpformat '%d' is not supported"),
dumpformat);
goto error;
}
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DUMP_CRASH, VIR_DUMP_LIVE, error);
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DUMP_CRASH, VIR_DUMP_RESET, error);
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DUMP_LIVE, VIR_DUMP_RESET, error);
if (conn->driver->domainCoreDumpWithFormat) {
int ret;
char *absolute_to;
/* We must absolutize the file path as the save is done out of process */
if (virFileAbsPath(to, &absolute_to) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("could not build absolute core file path"));
goto error;
}
ret = conn->driver->domainCoreDumpWithFormat(domain, absolute_to,
dumpformat, flags);
VIR_FREE(absolute_to);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(domain->conn);
return -1;
}
|
C
|
libvirt
| 0 |
CVE-2011-1799
|
https://www.cvedetails.com/cve/CVE-2011-1799/
|
CWE-20
|
https://github.com/chromium/chromium/commit/5fd35e5359c6345b8709695cd71fba307318e6aa
|
5fd35e5359c6345b8709695cd71fba307318e6aa
|
Source/WebCore: Fix for bug 64046 - Wrong image height in absolutely positioned div in
relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <[email protected]> on 2011-07-21
Reviewed by David Hyatt.
Test: fast/css/absolute-child-with-percent-height-inside-relative-parent.html
* rendering/RenderBox.cpp:
(WebCore::RenderBox::availableLogicalHeightUsing):
LayoutTests: Test to cover absolutely positioned child with percentage height
in relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <[email protected]> on 2011-07-21
Reviewed by David Hyatt.
* fast/css/absolute-child-with-percent-height-inside-relative-parent-expected.txt: Added.
* fast/css/absolute-child-with-percent-height-inside-relative-parent.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@91533 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void computeBlockStaticDistance(Length& logicalTop, Length& logicalBottom, const RenderBox* child, const RenderBoxModelObject* containerBlock)
{
if (!logicalTop.isAuto() || !logicalBottom.isAuto())
return;
int staticLogicalTop = child->layer()->staticBlockPosition() - containerBlock->borderBefore();
for (RenderObject* curr = child->parent(); curr && curr != containerBlock; curr = curr->container()) {
if (curr->isBox() && !curr->isTableRow())
staticLogicalTop += toRenderBox(curr)->logicalTop();
}
logicalTop.setValue(Fixed, staticLogicalTop);
}
|
static void computeBlockStaticDistance(Length& logicalTop, Length& logicalBottom, const RenderBox* child, const RenderBoxModelObject* containerBlock)
{
if (!logicalTop.isAuto() || !logicalBottom.isAuto())
return;
int staticLogicalTop = child->layer()->staticBlockPosition() - containerBlock->borderBefore();
for (RenderObject* curr = child->parent(); curr && curr != containerBlock; curr = curr->container()) {
if (curr->isBox() && !curr->isTableRow())
staticLogicalTop += toRenderBox(curr)->logicalTop();
}
logicalTop.setValue(Fixed, staticLogicalTop);
}
|
C
|
Chrome
| 0 |
CVE-2016-2324
|
https://www.cvedetails.com/cve/CVE-2016-2324/
|
CWE-119
|
https://github.com/git/git/commit/de1e67d0703894cb6ea782e36abb63976ab07e60
|
de1e67d0703894cb6ea782e36abb63976ab07e60
|
list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
static int git_pack_config(const char *k, const char *v, void *cb)
{
if (!strcmp(k, "pack.window")) {
window = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.windowmemory")) {
window_memory_limit = git_config_ulong(k, v);
return 0;
}
if (!strcmp(k, "pack.depth")) {
depth = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.compression")) {
int level = git_config_int(k, v);
if (level == -1)
level = Z_DEFAULT_COMPRESSION;
else if (level < 0 || level > Z_BEST_COMPRESSION)
die("bad pack compression level %d", level);
pack_compression_level = level;
pack_compression_seen = 1;
return 0;
}
if (!strcmp(k, "pack.deltacachesize")) {
max_delta_cache_size = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.deltacachelimit")) {
cache_max_small_delta_size = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.writebitmaphashcache")) {
if (git_config_bool(k, v))
write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
else
write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
}
if (!strcmp(k, "pack.usebitmaps")) {
use_bitmap_index = git_config_bool(k, v);
return 0;
}
if (!strcmp(k, "pack.threads")) {
delta_search_threads = git_config_int(k, v);
if (delta_search_threads < 0)
die("invalid number of threads specified (%d)",
delta_search_threads);
#ifdef NO_PTHREADS
if (delta_search_threads != 1)
warning("no threads support, ignoring %s", k);
#endif
return 0;
}
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
die("bad pack.indexversion=%"PRIu32,
pack_idx_opts.version);
return 0;
}
return git_default_config(k, v, cb);
}
|
static int git_pack_config(const char *k, const char *v, void *cb)
{
if (!strcmp(k, "pack.window")) {
window = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.windowmemory")) {
window_memory_limit = git_config_ulong(k, v);
return 0;
}
if (!strcmp(k, "pack.depth")) {
depth = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.compression")) {
int level = git_config_int(k, v);
if (level == -1)
level = Z_DEFAULT_COMPRESSION;
else if (level < 0 || level > Z_BEST_COMPRESSION)
die("bad pack compression level %d", level);
pack_compression_level = level;
pack_compression_seen = 1;
return 0;
}
if (!strcmp(k, "pack.deltacachesize")) {
max_delta_cache_size = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.deltacachelimit")) {
cache_max_small_delta_size = git_config_int(k, v);
return 0;
}
if (!strcmp(k, "pack.writebitmaphashcache")) {
if (git_config_bool(k, v))
write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
else
write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
}
if (!strcmp(k, "pack.usebitmaps")) {
use_bitmap_index = git_config_bool(k, v);
return 0;
}
if (!strcmp(k, "pack.threads")) {
delta_search_threads = git_config_int(k, v);
if (delta_search_threads < 0)
die("invalid number of threads specified (%d)",
delta_search_threads);
#ifdef NO_PTHREADS
if (delta_search_threads != 1)
warning("no threads support, ignoring %s", k);
#endif
return 0;
}
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
die("bad pack.indexversion=%"PRIu32,
pack_idx_opts.version);
return 0;
}
return git_default_config(k, v, cb);
}
|
C
|
git
| 0 |
CVE-2016-7117
|
https://www.cvedetails.com/cve/CVE-2016-7117/
|
CWE-19
|
https://github.com/torvalds/linux/commit/34b88a68f26a75e4fded796f1a49c40f82234b7d
|
34b88a68f26a75e4fded796f1a49c40f82234b7d
|
net: Fix use after free in the recvmmsg exit path
The syzkaller fuzzer hit the following use-after-free:
Call Trace:
[<ffffffff8175ea0e>] __asan_report_load8_noabort+0x3e/0x40 mm/kasan/report.c:295
[<ffffffff851cc31a>] __sys_recvmmsg+0x6fa/0x7f0 net/socket.c:2261
[< inline >] SYSC_recvmmsg net/socket.c:2281
[<ffffffff851cc57f>] SyS_recvmmsg+0x16f/0x180 net/socket.c:2270
[<ffffffff86332bb6>] entry_SYSCALL_64_fastpath+0x16/0x7a
arch/x86/entry/entry_64.S:185
And, as Dmitry rightly assessed, that is because we can drop the
reference and then touch it when the underlying recvmsg calls return
some packets and then hit an error, which will make recvmmsg to set
sock->sk->sk_err, oops, fix it.
Reported-and-Tested-by: Dmitry Vyukov <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: Kostya Serebryany <[email protected]>
Cc: Sasha Levin <[email protected]>
Fixes: a2e2725541fa ("net: Introduce recvmmsg socket syscall")
http://lkml.kernel.org/r/[email protected]
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
return sock_sendmsg(sock, msg);
}
|
int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t size)
{
iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
return sock_sendmsg(sock, msg);
}
|
C
|
linux
| 0 |
CVE-2013-2871
|
https://www.cvedetails.com/cve/CVE-2013-2871/
|
CWE-20
|
https://github.com/chromium/chromium/commit/bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9
|
bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9
|
Setting input.x-webkit-speech should not cause focus change
In r150866, we introduced element()->focus() in destroyShadowSubtree()
to retain focus on <input> when its type attribute gets changed.
But when x-webkit-speech attribute is changed, the element is detached
before calling destroyShadowSubtree() and element()->focus() failed
This patch moves detach() after destroyShadowSubtree() to fix the
problem.
BUG=243818
TEST=fast/forms/input-type-change-focusout.html
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/16084005
git-svn-id: svn://svn.chromium.org/blink/trunk@151444 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void HTMLInputElement::setValue(const String& value, ExceptionCode& ec, TextFieldEventBehavior eventBehavior)
{
if (isFileUpload() && !value.isEmpty()) {
ec = INVALID_STATE_ERR;
return;
}
setValue(value, eventBehavior);
}
|
void HTMLInputElement::setValue(const String& value, ExceptionCode& ec, TextFieldEventBehavior eventBehavior)
{
if (isFileUpload() && !value.isEmpty()) {
ec = INVALID_STATE_ERR;
return;
}
setValue(value, eventBehavior);
}
|
C
|
Chrome
| 0 |
CVE-2016-1665
|
https://www.cvedetails.com/cve/CVE-2016-1665/
|
CWE-20
|
https://github.com/chromium/chromium/commit/282f53ffdc3b1902da86f6a0791af736837efbf8
|
282f53ffdc3b1902da86f6a0791af736837efbf8
|
[signin] Add metrics to track the source for refresh token updated events
This CL add a source for update and revoke credentials operations. It then
surfaces the source in the chrome://signin-internals page.
This CL also records the following histograms that track refresh token events:
* Signin.RefreshTokenUpdated.ToValidToken.Source
* Signin.RefreshTokenUpdated.ToInvalidToken.Source
* Signin.RefreshTokenRevoked.Source
These histograms are needed to validate the assumptions of how often tokens
are revoked by the browser and the sources for the token revocations.
Bug: 896182
Change-Id: I2fcab80ee8e5699708e695bc3289fa6d34859a90
Reviewed-on: https://chromium-review.googlesource.com/c/1286464
Reviewed-by: Jochen Eisinger <[email protected]>
Reviewed-by: David Roger <[email protected]>
Reviewed-by: Ilya Sherman <[email protected]>
Commit-Queue: Mihai Sardarescu <[email protected]>
Cr-Commit-Position: refs/heads/master@{#606181}
|
bool GetConfiguration(const std::string& json, SyncConfigInfo* config) {
std::unique_ptr<base::Value> parsed_value = base::JSONReader::Read(json);
base::DictionaryValue* result;
if (!parsed_value || !parsed_value->GetAsDictionary(&result)) {
DLOG(ERROR) << "GetConfiguration() not passed a Dictionary";
return false;
}
if (!result->GetBoolean("syncAllDataTypes", &config->sync_everything)) {
DLOG(ERROR) << "GetConfiguration() not passed a syncAllDataTypes value";
return false;
}
if (!result->GetBoolean("paymentsIntegrationEnabled",
&config->payments_integration_enabled)) {
DLOG(ERROR) << "GetConfiguration() not passed a paymentsIntegrationEnabled "
<< "value";
return false;
}
syncer::ModelTypeNameMap type_names = syncer::GetUserSelectableTypeNameMap();
for (syncer::ModelTypeNameMap::const_iterator it = type_names.begin();
it != type_names.end(); ++it) {
std::string key_name = it->second + std::string("Synced");
bool sync_value;
if (!result->GetBoolean(key_name, &sync_value)) {
DLOG(ERROR) << "GetConfiguration() not passed a value for " << key_name;
return false;
}
if (sync_value)
config->data_types.Put(it->first);
}
if (!result->GetBoolean("encryptAllData", &config->encrypt_all)) {
DLOG(ERROR) << "GetConfiguration() not passed a value for encryptAllData";
return false;
}
if (result->GetString("passphrase", &config->passphrase) &&
!config->passphrase.empty() &&
!result->GetBoolean("setNewPassphrase", &config->set_new_passphrase)) {
DLOG(ERROR) << "GetConfiguration() not passed a set_new_passphrase value";
return false;
}
return true;
}
|
bool GetConfiguration(const std::string& json, SyncConfigInfo* config) {
std::unique_ptr<base::Value> parsed_value = base::JSONReader::Read(json);
base::DictionaryValue* result;
if (!parsed_value || !parsed_value->GetAsDictionary(&result)) {
DLOG(ERROR) << "GetConfiguration() not passed a Dictionary";
return false;
}
if (!result->GetBoolean("syncAllDataTypes", &config->sync_everything)) {
DLOG(ERROR) << "GetConfiguration() not passed a syncAllDataTypes value";
return false;
}
if (!result->GetBoolean("paymentsIntegrationEnabled",
&config->payments_integration_enabled)) {
DLOG(ERROR) << "GetConfiguration() not passed a paymentsIntegrationEnabled "
<< "value";
return false;
}
syncer::ModelTypeNameMap type_names = syncer::GetUserSelectableTypeNameMap();
for (syncer::ModelTypeNameMap::const_iterator it = type_names.begin();
it != type_names.end(); ++it) {
std::string key_name = it->second + std::string("Synced");
bool sync_value;
if (!result->GetBoolean(key_name, &sync_value)) {
DLOG(ERROR) << "GetConfiguration() not passed a value for " << key_name;
return false;
}
if (sync_value)
config->data_types.Put(it->first);
}
if (!result->GetBoolean("encryptAllData", &config->encrypt_all)) {
DLOG(ERROR) << "GetConfiguration() not passed a value for encryptAllData";
return false;
}
if (result->GetString("passphrase", &config->passphrase) &&
!config->passphrase.empty() &&
!result->GetBoolean("setNewPassphrase", &config->set_new_passphrase)) {
DLOG(ERROR) << "GetConfiguration() not passed a set_new_passphrase value";
return false;
}
return true;
}
|
C
|
Chrome
| 0 |
CVE-2014-7909
|
https://www.cvedetails.com/cve/CVE-2014-7909/
|
CWE-189
|
https://github.com/chromium/chromium/commit/2571533bbb5b554ff47205c8ef1513ccc0817c3e
|
2571533bbb5b554ff47205c8ef1513ccc0817c3e
|
DocumentThreadableLoader: Add guards for sync notifyFinished() in setResource()
In loadRequest(), setResource() can call clear() synchronously:
DocumentThreadableLoader::clear()
DocumentThreadableLoader::handleError()
Resource::didAddClient()
RawResource::didAddClient()
and thus |m_client| can be null while resource() isn't null after setResource(),
causing crashes (Issue 595964).
This CL checks whether |*this| is destructed and
whether |m_client| is null after setResource().
BUG=595964
Review-Url: https://codereview.chromium.org/1902683002
Cr-Commit-Position: refs/heads/master@{#391001}
|
void DocumentThreadableLoader::loadResourceSynchronously(Document& document, const ResourceRequest& request, ThreadableLoaderClient& client, const ThreadableLoaderOptions& options, const ResourceLoaderOptions& resourceLoaderOptions)
{
OwnPtr<DocumentThreadableLoader> loader = adoptPtr(new DocumentThreadableLoader(document, &client, LoadSynchronously, options, resourceLoaderOptions));
loader->start(request);
}
|
void DocumentThreadableLoader::loadResourceSynchronously(Document& document, const ResourceRequest& request, ThreadableLoaderClient& client, const ThreadableLoaderOptions& options, const ResourceLoaderOptions& resourceLoaderOptions)
{
OwnPtr<DocumentThreadableLoader> loader = adoptPtr(new DocumentThreadableLoader(document, &client, LoadSynchronously, options, resourceLoaderOptions));
loader->start(request);
}
|
C
|
Chrome
| 0 |
CVE-2017-9465
|
https://www.cvedetails.com/cve/CVE-2017-9465/
|
CWE-125
|
https://github.com/VirusTotal/yara/commit/992480c30f75943e9cd6245bb2015c7737f9b661
|
992480c30f75943e9cd6245bb2015c7737f9b661
|
Fix buffer overrun (issue #678). Add assert for detecting this kind of issues earlier.
|
int _yr_re_emit(
RE_EMIT_CONTEXT* emit_context,
RE_NODE* re_node,
int flags,
uint8_t** code_addr,
size_t* code_size)
{
size_t branch_size;
size_t split_size;
size_t inst_size;
size_t jmp_size;
int emit_split;
int emit_repeat;
int emit_prolog;
int emit_epilog;
RE_REPEAT_ARGS repeat_args;
RE_REPEAT_ARGS* repeat_start_args_addr;
RE_REPEAT_ANY_ARGS repeat_any_args;
RE_NODE* left;
RE_NODE* right;
int16_t* split_offset_addr = NULL;
int16_t* jmp_offset_addr = NULL;
uint8_t* instruction_addr = NULL;
*code_size = 0;
switch(re_node->type)
{
case RE_NODE_LITERAL:
FAIL_ON_ERROR(_yr_emit_inst_arg_uint8(
emit_context,
RE_OPCODE_LITERAL,
re_node->value,
&instruction_addr,
NULL,
code_size));
break;
case RE_NODE_MASKED_LITERAL:
FAIL_ON_ERROR(_yr_emit_inst_arg_uint16(
emit_context,
RE_OPCODE_MASKED_LITERAL,
re_node->mask << 8 | re_node->value,
&instruction_addr,
NULL,
code_size));
break;
case RE_NODE_WORD_CHAR:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_WORD_CHAR,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_WORD_CHAR:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_WORD_CHAR,
&instruction_addr,
code_size));
break;
case RE_NODE_WORD_BOUNDARY:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_WORD_BOUNDARY,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_WORD_BOUNDARY:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_WORD_BOUNDARY,
&instruction_addr,
code_size));
break;
case RE_NODE_SPACE:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_SPACE,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_SPACE:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_SPACE,
&instruction_addr,
code_size));
break;
case RE_NODE_DIGIT:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_DIGIT,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_DIGIT:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_DIGIT,
&instruction_addr,
code_size));
break;
case RE_NODE_ANY:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_ANY,
&instruction_addr,
code_size));
break;
case RE_NODE_CLASS:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_CLASS,
&instruction_addr,
code_size));
FAIL_ON_ERROR(yr_arena_write_data(
emit_context->arena,
re_node->class_vector,
32,
NULL));
*code_size += 32;
break;
case RE_NODE_ANCHOR_START:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_MATCH_AT_START,
&instruction_addr,
code_size));
break;
case RE_NODE_ANCHOR_END:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_MATCH_AT_END,
&instruction_addr,
code_size));
break;
case RE_NODE_CONCAT:
if (flags & EMIT_BACKWARDS)
{
left = re_node->right;
right = re_node->left;
}
else
{
left = re_node->left;
right = re_node->right;
}
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
left,
flags,
&instruction_addr,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
right,
flags,
NULL,
&branch_size));
*code_size += branch_size;
break;
case RE_NODE_PLUS:
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
&instruction_addr,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
re_node->greedy ? RE_OPCODE_SPLIT_B : RE_OPCODE_SPLIT_A,
-((int16_t) branch_size),
NULL,
&split_offset_addr,
&split_size));
*code_size += split_size;
break;
case RE_NODE_STAR:
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
re_node->greedy ? RE_OPCODE_SPLIT_A : RE_OPCODE_SPLIT_B,
0,
&instruction_addr,
&split_offset_addr,
&split_size));
*code_size += split_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
NULL,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_emit_inst_arg_int16(
emit_context,
RE_OPCODE_JUMP,
-((uint16_t)(branch_size + split_size)),
NULL,
&jmp_offset_addr,
&jmp_size));
*code_size += jmp_size;
assert(split_size + branch_size + jmp_size < INT16_MAX);
*split_offset_addr = (int16_t) (split_size + branch_size + jmp_size);
break;
case RE_NODE_ALT:
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
RE_OPCODE_SPLIT_A,
0,
&instruction_addr,
&split_offset_addr,
&split_size));
*code_size += split_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
NULL,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_emit_inst_arg_int16(
emit_context,
RE_OPCODE_JUMP,
0,
NULL,
&jmp_offset_addr,
&jmp_size));
*code_size += jmp_size;
assert(split_size + branch_size + jmp_size < INT16_MAX);
*split_offset_addr = (int16_t) (split_size + branch_size + jmp_size);
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->right,
flags,
NULL,
&branch_size));
*code_size += branch_size;
assert(branch_size + jmp_size < INT16_MAX);
*jmp_offset_addr = (int16_t) (branch_size + jmp_size);
break;
case RE_NODE_RANGE_ANY:
repeat_any_args.min = re_node->start;
repeat_any_args.max = re_node->end;
FAIL_ON_ERROR(_yr_emit_inst_arg_struct(
emit_context,
re_node->greedy ?
RE_OPCODE_REPEAT_ANY_GREEDY :
RE_OPCODE_REPEAT_ANY_UNGREEDY,
&repeat_any_args,
sizeof(repeat_any_args),
&instruction_addr,
NULL,
&inst_size));
*code_size += inst_size;
break;
case RE_NODE_RANGE:
emit_prolog = re_node->start > 0;
emit_repeat = re_node->end > re_node->start + 1 || re_node->end > 2;
emit_split = re_node->end > re_node->start;
emit_epilog = re_node->end > re_node->start || re_node->end > 1;
if (emit_prolog)
{
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
&instruction_addr,
&branch_size));
*code_size += branch_size;
}
if (emit_repeat)
{
repeat_args.min = re_node->start;
repeat_args.max = re_node->end;
if (emit_prolog)
{
repeat_args.max--;
repeat_args.min--;
}
if (emit_split)
repeat_args.max--;
else
repeat_args.min--;
repeat_args.offset = 0;
FAIL_ON_ERROR(_yr_emit_inst_arg_struct(
emit_context,
re_node->greedy ?
RE_OPCODE_REPEAT_START_GREEDY :
RE_OPCODE_REPEAT_START_UNGREEDY,
&repeat_args,
sizeof(repeat_args),
emit_prolog ? NULL : &instruction_addr,
(void**) &repeat_start_args_addr,
&inst_size));
*code_size += inst_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags | EMIT_DONT_SET_FORWARDS_CODE | EMIT_DONT_SET_BACKWARDS_CODE,
NULL,
&branch_size));
*code_size += branch_size;
repeat_start_args_addr->offset = (int32_t)(2 * inst_size + branch_size);
repeat_args.offset = -((int32_t) branch_size);
FAIL_ON_ERROR(_yr_emit_inst_arg_struct(
emit_context,
re_node->greedy ?
RE_OPCODE_REPEAT_END_GREEDY :
RE_OPCODE_REPEAT_END_UNGREEDY,
&repeat_args,
sizeof(repeat_args),
NULL,
NULL,
&inst_size));
*code_size += inst_size;
}
if (emit_split)
{
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
re_node->greedy ?
RE_OPCODE_SPLIT_A :
RE_OPCODE_SPLIT_B,
0,
NULL,
&split_offset_addr,
&split_size));
*code_size += split_size;
}
if (emit_epilog)
{
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
emit_prolog ? flags | EMIT_DONT_SET_FORWARDS_CODE : flags,
emit_prolog || emit_repeat ? NULL : &instruction_addr,
&branch_size));
*code_size += branch_size;
}
if (emit_split)
{
assert(split_size + branch_size < INT16_MAX);
*split_offset_addr = (int16_t) (split_size + branch_size);
}
break;
}
if (flags & EMIT_BACKWARDS)
{
if (!(flags & EMIT_DONT_SET_BACKWARDS_CODE))
re_node->backward_code = instruction_addr + *code_size;
}
else
{
if (!(flags & EMIT_DONT_SET_FORWARDS_CODE))
re_node->forward_code = instruction_addr;
}
if (code_addr != NULL)
*code_addr = instruction_addr;
return ERROR_SUCCESS;
}
|
int _yr_re_emit(
RE_EMIT_CONTEXT* emit_context,
RE_NODE* re_node,
int flags,
uint8_t** code_addr,
size_t* code_size)
{
size_t branch_size;
size_t split_size;
size_t inst_size;
size_t jmp_size;
int emit_split;
int emit_repeat;
int emit_prolog;
int emit_epilog;
RE_REPEAT_ARGS repeat_args;
RE_REPEAT_ARGS* repeat_start_args_addr;
RE_REPEAT_ANY_ARGS repeat_any_args;
RE_NODE* left;
RE_NODE* right;
int16_t* split_offset_addr = NULL;
int16_t* jmp_offset_addr = NULL;
uint8_t* instruction_addr = NULL;
*code_size = 0;
switch(re_node->type)
{
case RE_NODE_LITERAL:
FAIL_ON_ERROR(_yr_emit_inst_arg_uint8(
emit_context,
RE_OPCODE_LITERAL,
re_node->value,
&instruction_addr,
NULL,
code_size));
break;
case RE_NODE_MASKED_LITERAL:
FAIL_ON_ERROR(_yr_emit_inst_arg_uint16(
emit_context,
RE_OPCODE_MASKED_LITERAL,
re_node->mask << 8 | re_node->value,
&instruction_addr,
NULL,
code_size));
break;
case RE_NODE_WORD_CHAR:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_WORD_CHAR,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_WORD_CHAR:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_WORD_CHAR,
&instruction_addr,
code_size));
break;
case RE_NODE_WORD_BOUNDARY:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_WORD_BOUNDARY,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_WORD_BOUNDARY:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_WORD_BOUNDARY,
&instruction_addr,
code_size));
break;
case RE_NODE_SPACE:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_SPACE,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_SPACE:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_SPACE,
&instruction_addr,
code_size));
break;
case RE_NODE_DIGIT:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_DIGIT,
&instruction_addr,
code_size));
break;
case RE_NODE_NON_DIGIT:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_NON_DIGIT,
&instruction_addr,
code_size));
break;
case RE_NODE_ANY:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_ANY,
&instruction_addr,
code_size));
break;
case RE_NODE_CLASS:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_CLASS,
&instruction_addr,
code_size));
FAIL_ON_ERROR(yr_arena_write_data(
emit_context->arena,
re_node->class_vector,
32,
NULL));
*code_size += 32;
break;
case RE_NODE_ANCHOR_START:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_MATCH_AT_START,
&instruction_addr,
code_size));
break;
case RE_NODE_ANCHOR_END:
FAIL_ON_ERROR(_yr_emit_inst(
emit_context,
RE_OPCODE_MATCH_AT_END,
&instruction_addr,
code_size));
break;
case RE_NODE_CONCAT:
if (flags & EMIT_BACKWARDS)
{
left = re_node->right;
right = re_node->left;
}
else
{
left = re_node->left;
right = re_node->right;
}
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
left,
flags,
&instruction_addr,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
right,
flags,
NULL,
&branch_size));
*code_size += branch_size;
break;
case RE_NODE_PLUS:
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
&instruction_addr,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
re_node->greedy ? RE_OPCODE_SPLIT_B : RE_OPCODE_SPLIT_A,
-((int16_t) branch_size),
NULL,
&split_offset_addr,
&split_size));
*code_size += split_size;
break;
case RE_NODE_STAR:
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
re_node->greedy ? RE_OPCODE_SPLIT_A : RE_OPCODE_SPLIT_B,
0,
&instruction_addr,
&split_offset_addr,
&split_size));
*code_size += split_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
NULL,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_emit_inst_arg_int16(
emit_context,
RE_OPCODE_JUMP,
-((uint16_t)(branch_size + split_size)),
NULL,
&jmp_offset_addr,
&jmp_size));
*code_size += jmp_size;
assert(split_size + branch_size + jmp_size < INT16_MAX);
*split_offset_addr = (int16_t) (split_size + branch_size + jmp_size);
break;
case RE_NODE_ALT:
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
RE_OPCODE_SPLIT_A,
0,
&instruction_addr,
&split_offset_addr,
&split_size));
*code_size += split_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
NULL,
&branch_size));
*code_size += branch_size;
FAIL_ON_ERROR(_yr_emit_inst_arg_int16(
emit_context,
RE_OPCODE_JUMP,
0,
NULL,
&jmp_offset_addr,
&jmp_size));
*code_size += jmp_size;
assert(split_size + branch_size + jmp_size < INT16_MAX);
*split_offset_addr = (int16_t) (split_size + branch_size + jmp_size);
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->right,
flags,
NULL,
&branch_size));
*code_size += branch_size;
assert(branch_size + jmp_size < INT16_MAX);
*jmp_offset_addr = (int16_t) (branch_size + jmp_size);
break;
case RE_NODE_RANGE_ANY:
repeat_any_args.min = re_node->start;
repeat_any_args.max = re_node->end;
FAIL_ON_ERROR(_yr_emit_inst_arg_struct(
emit_context,
re_node->greedy ?
RE_OPCODE_REPEAT_ANY_GREEDY :
RE_OPCODE_REPEAT_ANY_UNGREEDY,
&repeat_any_args,
sizeof(repeat_any_args),
&instruction_addr,
NULL,
&inst_size));
*code_size += inst_size;
break;
case RE_NODE_RANGE:
emit_prolog = re_node->start > 0;
emit_repeat = re_node->end > re_node->start + 1 || re_node->end > 2;
emit_split = re_node->end > re_node->start;
emit_epilog = re_node->end > re_node->start || re_node->end > 1;
if (emit_prolog)
{
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags,
&instruction_addr,
&branch_size));
*code_size += branch_size;
}
if (emit_repeat)
{
repeat_args.min = re_node->start;
repeat_args.max = re_node->end;
if (emit_prolog)
{
repeat_args.max--;
repeat_args.min--;
}
if (emit_split)
repeat_args.max--;
else
repeat_args.min--;
repeat_args.offset = 0;
FAIL_ON_ERROR(_yr_emit_inst_arg_struct(
emit_context,
re_node->greedy ?
RE_OPCODE_REPEAT_START_GREEDY :
RE_OPCODE_REPEAT_START_UNGREEDY,
&repeat_args,
sizeof(repeat_args),
emit_prolog ? NULL : &instruction_addr,
(void**) &repeat_start_args_addr,
&inst_size));
*code_size += inst_size;
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
flags | EMIT_DONT_SET_FORWARDS_CODE | EMIT_DONT_SET_BACKWARDS_CODE,
NULL,
&branch_size));
*code_size += branch_size;
repeat_start_args_addr->offset = (int32_t)(2 * inst_size + branch_size);
repeat_args.offset = -((int32_t) branch_size);
FAIL_ON_ERROR(_yr_emit_inst_arg_struct(
emit_context,
re_node->greedy ?
RE_OPCODE_REPEAT_END_GREEDY :
RE_OPCODE_REPEAT_END_UNGREEDY,
&repeat_args,
sizeof(repeat_args),
NULL,
NULL,
&inst_size));
*code_size += inst_size;
}
if (emit_split)
{
FAIL_ON_ERROR(_yr_emit_split(
emit_context,
re_node->greedy ?
RE_OPCODE_SPLIT_A :
RE_OPCODE_SPLIT_B,
0,
NULL,
&split_offset_addr,
&split_size));
*code_size += split_size;
}
if (emit_epilog)
{
FAIL_ON_ERROR(_yr_re_emit(
emit_context,
re_node->left,
emit_prolog ? flags | EMIT_DONT_SET_FORWARDS_CODE : flags,
emit_prolog || emit_repeat ? NULL : &instruction_addr,
&branch_size));
*code_size += branch_size;
}
if (emit_split)
{
assert(split_size + branch_size < INT16_MAX);
*split_offset_addr = (int16_t) (split_size + branch_size);
}
break;
}
if (flags & EMIT_BACKWARDS)
{
if (!(flags & EMIT_DONT_SET_BACKWARDS_CODE))
re_node->backward_code = instruction_addr + *code_size;
}
else
{
if (!(flags & EMIT_DONT_SET_FORWARDS_CODE))
re_node->forward_code = instruction_addr;
}
if (code_addr != NULL)
*code_addr = instruction_addr;
return ERROR_SUCCESS;
}
|
C
|
yara
| 0 |
CVE-2019-1563
|
https://www.cvedetails.com/cve/CVE-2019-1563/
|
CWE-311
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff;h=08229ad838c50f644d7e928e2eef147b4308ad64
|
08229ad838c50f644d7e928e2eef147b4308ad64
| null |
EVP_PKEY_CTX *CMS_RecipientInfo_get0_pkey_ctx(CMS_RecipientInfo *ri)
{
if (ri->type == CMS_RECIPINFO_TRANS)
return ri->d.ktri->pctx;
else if (ri->type == CMS_RECIPINFO_AGREE)
return ri->d.kari->pctx;
return NULL;
}
|
EVP_PKEY_CTX *CMS_RecipientInfo_get0_pkey_ctx(CMS_RecipientInfo *ri)
{
if (ri->type == CMS_RECIPINFO_TRANS)
return ri->d.ktri->pctx;
else if (ri->type == CMS_RECIPINFO_AGREE)
return ri->d.kari->pctx;
return NULL;
}
|
C
|
openssl
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/62b8b6e168a12263aab6b88dbef0b900cc37309f
|
62b8b6e168a12263aab6b88dbef0b900cc37309f
|
Add partial magnifier to ash palette.
The partial magnifier will magnify a small portion of the screen, similar to a spyglass.
TEST=./out/Release/ash_unittests --gtest_filter=PartialMagnificationControllerTest.*
[email protected]
BUG=616112
Review-Url: https://codereview.chromium.org/2239553002
Cr-Commit-Position: refs/heads/master@{#414124}
|
void PaletteDelegateChromeOS::CreateNote() {
chromeos::LaunchNoteTakingAppForNewNote(GetProfile(), base::FilePath());
}
|
void PaletteDelegateChromeOS::CreateNote() {
chromeos::LaunchNoteTakingAppForNewNote(GetProfile(), base::FilePath());
}
|
C
|
Chrome
| 0 |
CVE-2018-14395
|
https://www.cvedetails.com/cve/CVE-2018-14395/
|
CWE-369
|
https://github.com/FFmpeg/FFmpeg/commit/fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
fa19fbcf712a6a6cc5a5cfdc3254a97b9bce6582
|
avformat/movenc: Write version 2 of audio atom if channels is not known
The version 1 needs the channel count and would divide by 0
Fixes: division by 0
Fixes: fpe_movenc.c_1108_1.ogg
Fixes: fpe_movenc.c_1108_2.ogg
Fixes: fpe_movenc.c_1108_3.wav
Found-by: #CHEN HONGXU# <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
static int mov_write_eac3_tag(AVIOContext *pb, MOVTrack *track)
{
PutBitContext pbc;
uint8_t *buf;
struct eac3_info *info;
int size, i;
if (!track->eac3_priv)
return AVERROR(EINVAL);
info = track->eac3_priv;
size = 2 + 4 * (info->num_ind_sub + 1);
buf = av_malloc(size);
if (!buf) {
size = AVERROR(ENOMEM);
goto end;
}
init_put_bits(&pbc, buf, size);
put_bits(&pbc, 13, info->data_rate);
put_bits(&pbc, 3, info->num_ind_sub);
for (i = 0; i <= info->num_ind_sub; i++) {
put_bits(&pbc, 2, info->substream[i].fscod);
put_bits(&pbc, 5, info->substream[i].bsid);
put_bits(&pbc, 1, 0); /* reserved */
put_bits(&pbc, 1, 0); /* asvc */
put_bits(&pbc, 3, info->substream[i].bsmod);
put_bits(&pbc, 3, info->substream[i].acmod);
put_bits(&pbc, 1, info->substream[i].lfeon);
put_bits(&pbc, 5, 0); /* reserved */
put_bits(&pbc, 4, info->substream[i].num_dep_sub);
if (!info->substream[i].num_dep_sub) {
put_bits(&pbc, 1, 0); /* reserved */
size--;
} else {
put_bits(&pbc, 9, info->substream[i].chan_loc);
}
}
flush_put_bits(&pbc);
avio_wb32(pb, size + 8);
ffio_wfourcc(pb, "dec3");
avio_write(pb, buf, size);
av_free(buf);
end:
av_packet_unref(&info->pkt);
av_freep(&track->eac3_priv);
return size;
}
|
static int mov_write_eac3_tag(AVIOContext *pb, MOVTrack *track)
{
PutBitContext pbc;
uint8_t *buf;
struct eac3_info *info;
int size, i;
if (!track->eac3_priv)
return AVERROR(EINVAL);
info = track->eac3_priv;
size = 2 + 4 * (info->num_ind_sub + 1);
buf = av_malloc(size);
if (!buf) {
size = AVERROR(ENOMEM);
goto end;
}
init_put_bits(&pbc, buf, size);
put_bits(&pbc, 13, info->data_rate);
put_bits(&pbc, 3, info->num_ind_sub);
for (i = 0; i <= info->num_ind_sub; i++) {
put_bits(&pbc, 2, info->substream[i].fscod);
put_bits(&pbc, 5, info->substream[i].bsid);
put_bits(&pbc, 1, 0); /* reserved */
put_bits(&pbc, 1, 0); /* asvc */
put_bits(&pbc, 3, info->substream[i].bsmod);
put_bits(&pbc, 3, info->substream[i].acmod);
put_bits(&pbc, 1, info->substream[i].lfeon);
put_bits(&pbc, 5, 0); /* reserved */
put_bits(&pbc, 4, info->substream[i].num_dep_sub);
if (!info->substream[i].num_dep_sub) {
put_bits(&pbc, 1, 0); /* reserved */
size--;
} else {
put_bits(&pbc, 9, info->substream[i].chan_loc);
}
}
flush_put_bits(&pbc);
avio_wb32(pb, size + 8);
ffio_wfourcc(pb, "dec3");
avio_write(pb, buf, size);
av_free(buf);
end:
av_packet_unref(&info->pkt);
av_freep(&track->eac3_priv);
return size;
}
|
C
|
FFmpeg
| 0 |
CVE-2013-7296
|
https://www.cvedetails.com/cve/CVE-2013-7296/
|
CWE-119
|
https://cgit.freedesktop.org/poppler/poppler/commit/?id=58e04a08afee39370283c494ee2e4e392fd3b684
|
58e04a08afee39370283c494ee2e4e392fd3b684
| null |
JBIG2PatternDict::JBIG2PatternDict(Guint segNumA, Guint sizeA):
JBIG2Segment(segNumA)
{
bitmaps = (JBIG2Bitmap **)gmallocn_checkoverflow(sizeA, sizeof(JBIG2Bitmap *));
if (bitmaps) {
size = sizeA;
} else {
size = 0;
error(errSyntaxError, -1, "JBIG2PatternDict: can't allocate bitmaps");
}
}
|
JBIG2PatternDict::JBIG2PatternDict(Guint segNumA, Guint sizeA):
JBIG2Segment(segNumA)
{
bitmaps = (JBIG2Bitmap **)gmallocn_checkoverflow(sizeA, sizeof(JBIG2Bitmap *));
if (bitmaps) {
size = sizeA;
} else {
size = 0;
error(errSyntaxError, -1, "JBIG2PatternDict: can't allocate bitmaps");
}
}
|
CPP
|
poppler
| 0 |
CVE-2011-2517
|
https://www.cvedetails.com/cve/CVE-2011-2517/
|
CWE-119
|
https://github.com/torvalds/linux/commit/208c72f4fe44fe09577e7975ba0e7fa0278f3d03
|
208c72f4fe44fe09577e7975ba0e7fa0278f3d03
|
nl80211: fix check for valid SSID size in scan operations
In both trigger_scan and sched_scan operations, we were checking for
the SSID length before assigning the value correctly. Since the
memory was just kzalloc'ed, the check was always failing and SSID with
over 32 characters were allowed to go through.
This was causing a buffer overflow when copying the actual SSID to the
proper place.
This bug has been there since 2.6.29-rc4.
Cc: [email protected]
Signed-off-by: Luciano Coelho <[email protected]>
Signed-off-by: John W. Linville <[email protected]>
|
static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
{
if (info->attrs[NL80211_ATTR_KEY_DATA]) {
k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]);
k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]);
}
if (info->attrs[NL80211_ATTR_KEY_SEQ]) {
k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]);
k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]);
}
if (info->attrs[NL80211_ATTR_KEY_IDX])
k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
if (info->attrs[NL80211_ATTR_KEY_CIPHER])
k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]);
k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT];
k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT];
if (k->def) {
k->def_uni = true;
k->def_multi = true;
}
if (k->defmgmt)
k->def_multi = true;
if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
return -EINVAL;
}
if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
int err = nla_parse_nested(
kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES],
nl80211_key_default_policy);
if (err)
return err;
k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST];
k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST];
}
return 0;
}
|
static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
{
if (info->attrs[NL80211_ATTR_KEY_DATA]) {
k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]);
k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]);
}
if (info->attrs[NL80211_ATTR_KEY_SEQ]) {
k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]);
k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]);
}
if (info->attrs[NL80211_ATTR_KEY_IDX])
k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
if (info->attrs[NL80211_ATTR_KEY_CIPHER])
k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]);
k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT];
k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT];
if (k->def) {
k->def_uni = true;
k->def_multi = true;
}
if (k->defmgmt)
k->def_multi = true;
if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
return -EINVAL;
}
if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES];
int err = nla_parse_nested(
kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES],
nl80211_key_default_policy);
if (err)
return err;
k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST];
k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST];
}
return 0;
}
|
C
|
linux
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/223c449d19eb5d889bc828e011c1a23e5d52b4c9
|
223c449d19eb5d889bc828e011c1a23e5d52b4c9
|
Handle CreateFile() trimming trailing dots and spaces in downloads.
BUG=37007
TEST=unit_tests --gtest_filter=DownloadManagerTest.*
Review URL: http://codereview.chromium.org/660297
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@40479 0039d316-1c4b-4281-b951-d872f2087c98
|
bool ParseHostAndPort(const std::string& host_and_port,
std::string* host,
int* port) {
return ParseHostAndPort(
host_and_port.begin(), host_and_port.end(), host, port);
}
|
bool ParseHostAndPort(const std::string& host_and_port,
std::string* host,
int* port) {
return ParseHostAndPort(
host_and_port.begin(), host_and_port.end(), host, port);
}
|
C
|
Chrome
| 0 |
CVE-2011-2849
|
https://www.cvedetails.com/cve/CVE-2011-2849/
| null |
https://github.com/chromium/chromium/commit/5dc90e57abcc7f0489e7ae09a3e687e9c6f4fad5
|
5dc90e57abcc7f0489e7ae09a3e687e9c6f4fad5
|
Use ScopedRunnableMethodFactory in WebSocketJob
Don't post SendPending if it is already posted.
BUG=89795
TEST=none
Review URL: http://codereview.chromium.org/7488007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93599 0039d316-1c4b-4281-b951-d872f2087c98
|
GURL WebSocketJob::GetURLForCookies() const {
GURL url = socket_->url();
std::string scheme = socket_->is_secure() ? "https" : "http";
url_canon::Replacements<char> replacements;
replacements.SetScheme(scheme.c_str(),
url_parse::Component(0, scheme.length()));
return url.ReplaceComponents(replacements);
}
|
GURL WebSocketJob::GetURLForCookies() const {
GURL url = socket_->url();
std::string scheme = socket_->is_secure() ? "https" : "http";
url_canon::Replacements<char> replacements;
replacements.SetScheme(scheme.c_str(),
url_parse::Component(0, scheme.length()));
return url.ReplaceComponents(replacements);
}
|
C
|
Chrome
| 0 |
CVE-2018-20553
|
https://www.cvedetails.com/cve/CVE-2018-20553/
|
CWE-125
|
https://github.com/appneta/tcpreplay/pull/532/commits/6b830a1640ca20528032c89a4fdd8291a4d2d8b2
|
6b830a1640ca20528032c89a4fdd8291a4d2d8b2
|
Bug #520 Fix heap overflow on zero or 0xFFFF packet length
Add check for packets that report zero packet length. Example
of fix:
src/tcpprep --auto=bridge --pcap=poc16-get_l2len-heapoverflow --cachefile=/dev/null
Warning: poc16-get_l2len-heapoverflow was captured using a snaplen of 17 bytes. This may mean you have truncated packets.
safe_pcap_next ERROR: Invalid packet length in tcpprep.c:process_raw_packets() line 334: packet length=0 capture length=0
|
_our_safe_malloc(size_t len, const char *funcname, const int line, const char *file)
{
u_char *ptr;
if ((ptr = malloc(len)) == NULL) {
fprintf(stderr, "ERROR in %s:%s() line %d: Unable to malloc() %zu bytes/n",
file, funcname, line, len);
exit(-1);
}
/* zero memory */
memset(ptr, 0, len);
/* wrapped inside an #ifdef for better performance */
dbgx(5, "Malloc'd %zu bytes in %s:%s() line %d", len, file, funcname, line);
return (void *)ptr;
}
|
_our_safe_malloc(size_t len, const char *funcname, const int line, const char *file)
{
u_char *ptr;
if ((ptr = malloc(len)) == NULL) {
fprintf(stderr, "ERROR in %s:%s() line %d: Unable to malloc() %zu bytes/n",
file, funcname, line, len);
exit(-1);
}
/* zero memory */
memset(ptr, 0, len);
/* wrapped inside an #ifdef for better performance */
dbgx(5, "Malloc'd %zu bytes in %s:%s() line %d", len, file, funcname, line);
return (void *)ptr;
}
|
C
|
tcpreplay
| 0 |
CVE-2012-0037
|
https://www.cvedetails.com/cve/CVE-2012-0037/
|
CWE-200
|
https://github.com/dajobe/raptor/commit/a676f235309a59d4aa78eeffd2574ae5d341fcb0
|
a676f235309a59d4aa78eeffd2574ae5d341fcb0
|
CVE-2012-0037
Enforce entity loading policy in raptor_libxml_resolveEntity
and raptor_libxml_getEntity by checking for file URIs and network URIs.
Add RAPTOR_OPTION_LOAD_EXTERNAL_ENTITIES / loadExternalEntities for
turning on loading of XML external entity loading, disabled by default.
This affects all the parsers that use SAX2: rdfxml, rss-tag-soup (and
aliases) and rdfa.
|
raptor_world_get_option_from_uri(raptor_world* world, raptor_uri *uri)
{
unsigned char *uri_string;
int i;
raptor_option option = (raptor_option)-1;
if(!uri)
return option;
RAPTOR_ASSERT_OBJECT_POINTER_RETURN_VALUE(world, raptor_world, (raptor_option)-1);
raptor_world_open(world);
uri_string = raptor_uri_as_string(uri);
if(strncmp((const char*)uri_string, raptor_option_uri_prefix,
raptor_option_uri_prefix_len))
return option;
uri_string += raptor_option_uri_prefix_len;
for(i = 0; i <= RAPTOR_OPTION_LAST; i++)
if(!strcmp(raptor_options_list[i].name, (const char*)uri_string)) {
option = (raptor_option)i;
break;
}
return option;
}
|
raptor_world_get_option_from_uri(raptor_world* world, raptor_uri *uri)
{
unsigned char *uri_string;
int i;
raptor_option option = (raptor_option)-1;
if(!uri)
return option;
RAPTOR_ASSERT_OBJECT_POINTER_RETURN_VALUE(world, raptor_world, (raptor_option)-1);
raptor_world_open(world);
uri_string = raptor_uri_as_string(uri);
if(strncmp((const char*)uri_string, raptor_option_uri_prefix,
raptor_option_uri_prefix_len))
return option;
uri_string += raptor_option_uri_prefix_len;
for(i = 0; i <= RAPTOR_OPTION_LAST; i++)
if(!strcmp(raptor_options_list[i].name, (const char*)uri_string)) {
option = (raptor_option)i;
break;
}
return option;
}
|
C
|
raptor
| 0 |
CVE-2018-6048
|
https://www.cvedetails.com/cve/CVE-2018-6048/
|
CWE-20
|
https://github.com/chromium/chromium/commit/931711135c90568f677cf42d94f2591a7eeced2e
|
931711135c90568f677cf42d94f2591a7eeced2e
|
Inherit referrer and policy when creating a nested browsing context
BUG=763194
[email protected]
Change-Id: Ide3950269adf26ba221f573dfa088e95291ab676
Reviewed-on: https://chromium-review.googlesource.com/732652
Reviewed-by: Emily Stark <[email protected]>
Commit-Queue: Jochen Eisinger <[email protected]>
Cr-Commit-Position: refs/heads/master@{#511211}
|
bool Document::TasksNeedSuspension() {
Page* page = GetPage();
return page && page->Paused();
}
|
bool Document::TasksNeedSuspension() {
Page* page = GetPage();
return page && page->Paused();
}
|
C
|
Chrome
| 0 |
CVE-2017-6850
|
https://www.cvedetails.com/cve/CVE-2017-6850/
|
CWE-476
|
https://github.com/mdadams/jasper/commit/e96fc4fdd525fa0ede28074a7e2b1caf94b58b0d
|
e96fc4fdd525fa0ede28074a7e2b1caf94b58b0d
|
Fixed bugs due to uninitialized data in the JP2 decoder.
Also, added some comments marking I/O stream interfaces that probably
need to be changed (in the long term) to fix integer overflow problems.
|
jp2_box_t *jp2_box_create(int type)
jp2_box_t *jp2_box_create0()
{
jp2_box_t *box;
if (!(box = jas_malloc(sizeof(jp2_box_t)))) {
return 0;
}
memset(box, 0, sizeof(jp2_box_t));
box->type = 0;
box->len = 0;
// Mark the box data as never having been constructed
// so that we will not errantly attempt to destroy it later.
box->ops = &jp2_boxinfo_unk.ops;
return box;
}
jp2_box_t *jp2_box_create(int type)
{
jp2_box_t *box;
jp2_boxinfo_t *boxinfo;
if (!(box = jp2_box_create0())) {
return 0;
}
box->type = type;
box->len = 0;
if (!(boxinfo = jp2_boxinfolookup(type))) {
return 0;
}
box->info = boxinfo;
box->ops = &boxinfo->ops;
return box;
}
|
jp2_box_t *jp2_box_create(int type)
{
jp2_box_t *box;
jp2_boxinfo_t *boxinfo;
if (!(box = jas_malloc(sizeof(jp2_box_t)))) {
return 0;
}
memset(box, 0, sizeof(jp2_box_t));
box->type = type;
box->len = 0;
if (!(boxinfo = jp2_boxinfolookup(type))) {
return 0;
}
box->info = boxinfo;
box->ops = &boxinfo->ops;
return box;
}
|
C
|
jasper
| 1 |
CVE-2017-6210
|
https://www.cvedetails.com/cve/CVE-2017-6210/
|
CWE-476
|
https://cgit.freedesktop.org/virglrenderer/commit/?id=0a5dff15912207b83018485f83e067474e818bab
|
0a5dff15912207b83018485f83e067474e818bab
| null |
static int vrend_decode_set_uniform_buffer(struct vrend_decode_ctx *ctx, int length)
{
if (length != VIRGL_SET_UNIFORM_BUFFER_SIZE)
return EINVAL;
uint32_t shader = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE);
uint32_t index = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_INDEX);
uint32_t offset = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_OFFSET);
uint32_t blength = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_LENGTH);
uint32_t handle = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE);
if (shader >= PIPE_SHADER_TYPES)
return EINVAL;
if (index >= PIPE_MAX_CONSTANT_BUFFERS)
return EINVAL;
vrend_set_uniform_buffer(ctx->grctx, shader, index, offset, blength, handle);
return 0;
}
|
static int vrend_decode_set_uniform_buffer(struct vrend_decode_ctx *ctx, int length)
{
if (length != VIRGL_SET_UNIFORM_BUFFER_SIZE)
return EINVAL;
uint32_t shader = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE);
uint32_t index = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_INDEX);
uint32_t offset = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_OFFSET);
uint32_t blength = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_LENGTH);
uint32_t handle = get_buf_entry(ctx, VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE);
if (shader >= PIPE_SHADER_TYPES)
return EINVAL;
if (index >= PIPE_MAX_CONSTANT_BUFFERS)
return EINVAL;
vrend_set_uniform_buffer(ctx->grctx, shader, index, offset, blength, handle);
return 0;
}
|
C
|
virglrenderer
| 0 |
CVE-2018-6158
|
https://www.cvedetails.com/cve/CVE-2018-6158/
|
CWE-362
|
https://github.com/chromium/chromium/commit/20b65d00ca3d8696430e22efad7485366f8c3a21
|
20b65d00ca3d8696430e22efad7485366f8c3a21
|
[oilpan] Fix GCInfoTable for multiple threads
Previously, grow and access from different threads could lead to a race
on the table backing; see bug.
- Rework the table to work on an existing reservation.
- Commit upon growing, avoiding any copies.
Drive-by: Fix over-allocation of table.
Bug: chromium:841280
Change-Id: I329cb6f40091e14e8c05334ba1104a9440c31d43
Reviewed-on: https://chromium-review.googlesource.com/1061525
Commit-Queue: Michael Lippautz <[email protected]>
Reviewed-by: Kentaro Hara <[email protected]>
Cr-Commit-Position: refs/heads/master@{#560434}
|
virtual void Bar() {}
|
virtual void Bar() {}
|
C
|
Chrome
| 0 |
CVE-2016-5218
|
https://www.cvedetails.com/cve/CVE-2016-5218/
|
CWE-20
|
https://github.com/chromium/chromium/commit/45d901b56f578a74b19ba0d10fa5c4c467f19303
|
45d901b56f578a74b19ba0d10fa5c4c467f19303
|
Paint tab groups with the group color.
* The background of TabGroupHeader now uses the group color.
* The backgrounds of tabs in the group are tinted with the group color.
This treatment, along with the colors chosen, are intended to be
a placeholder.
Bug: 905491
Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504
Commit-Queue: Bret Sepulveda <[email protected]>
Reviewed-by: Taylor Bergquist <[email protected]>
Cr-Commit-Position: refs/heads/master@{#660498}
|
int TabStrip::GetActiveTabWidth() const {
return layout_helper_->active_tab_width();
}
|
int TabStrip::GetActiveTabWidth() const {
return layout_helper_->active_tab_width();
}
|
C
|
Chrome
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.