CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2016-5773
https://www.cvedetails.com/cve/CVE-2016-5773/
CWE-416
https://github.com/php/php-src/commit/f6aef68089221c5ea047d4a74224ee3deead99a6?w=1
f6aef68089221c5ea047d4a74224ee3deead99a6?w=1
Fix bug #72434: ZipArchive class Use After Free Vulnerability in PHP's GC algorithm and unserialize
static char * php_zipobj_get_filename(ze_zip_object *obj TSRMLS_DC) /* {{{ */ { if (!obj) { return NULL; } if (obj->filename) { return obj->filename; } return NULL; } /* }}} */
static char * php_zipobj_get_filename(ze_zip_object *obj TSRMLS_DC) /* {{{ */ { if (!obj) { return NULL; } if (obj->filename) { return obj->filename; } return NULL; } /* }}} */
C
php-src
0
CVE-2013-4119
https://www.cvedetails.com/cve/CVE-2013-4119/
CWE-476
https://github.com/FreeRDP/FreeRDP/commit/0773bb9303d24473fe1185d85a424dfe159aff53
0773bb9303d24473fe1185d85a424dfe159aff53
nla: invalidate sec handle after creation If sec pointer isn't invalidated after creation it is not possible to check if the upper and lower pointers are valid. This fixes a segfault in the server part if the client disconnects before the authentication was finished.
void sspi_SecBufferFree(PSecBuffer SecBuffer) { free(SecBuffer->pvBuffer); SecBuffer->pvBuffer = NULL; SecBuffer->cbBuffer = 0; }
void sspi_SecBufferFree(PSecBuffer SecBuffer) { free(SecBuffer->pvBuffer); SecBuffer->pvBuffer = NULL; SecBuffer->cbBuffer = 0; }
C
FreeRDP
0
CVE-2013-7421
https://www.cvedetails.com/cve/CVE-2013-7421/
CWE-264
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
crypto: prefix module autoloading with "crypto-" This prefixes all crypto module loading with "crypto-" so we never run the risk of exposing module auto-loading to userspace via a crypto API, as demonstrated by Mathias Krause: https://lkml.org/lkml/2013/3/4/70 Signed-off-by: Kees Cook <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) { if (ap_configuration != NULL) { /* QCI not supported */ if (test_facility(76)) { /* format 1 - 256 bit domain field */ return snprintf(buf, PAGE_SIZE, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", ap_configuration->adm[0], ap_configuration->adm[1], ap_configuration->adm[2], ap_configuration->adm[3], ap_configuration->adm[4], ap_configuration->adm[5], ap_configuration->adm[6], ap_configuration->adm[7]); } else { /* format 0 - 16 bit domain field */ return snprintf(buf, PAGE_SIZE, "%08x%08x\n", ap_configuration->adm[0], ap_configuration->adm[1]); } } else { return snprintf(buf, PAGE_SIZE, "not supported\n"); } }
static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) { if (ap_configuration != NULL) { /* QCI not supported */ if (test_facility(76)) { /* format 1 - 256 bit domain field */ return snprintf(buf, PAGE_SIZE, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", ap_configuration->adm[0], ap_configuration->adm[1], ap_configuration->adm[2], ap_configuration->adm[3], ap_configuration->adm[4], ap_configuration->adm[5], ap_configuration->adm[6], ap_configuration->adm[7]); } else { /* format 0 - 16 bit domain field */ return snprintf(buf, PAGE_SIZE, "%08x%08x\n", ap_configuration->adm[0], ap_configuration->adm[1]); } } else { return snprintf(buf, PAGE_SIZE, "not supported\n"); } }
C
linux
0
CVE-2006-4192
https://www.cvedetails.com/cve/CVE-2006-4192/
null
https://cgit.freedesktop.org/gstreamer/gst-plugins-bad/commit/?id=bc2cdd57d549ab3ba59782e9b395d0cd683fd3ac
bc2cdd57d549ab3ba59782e9b395d0cd683fd3ac
null
void CSoundFile::SetCurrentOrder(UINT nPos) { while ((nPos < MAX_ORDERS) && (Order[nPos] == 0xFE)) nPos++; if ((nPos >= MAX_ORDERS) || (Order[nPos] >= MAX_PATTERNS)) return; for (UINT j=0; j<MAX_CHANNELS; j++) { Chn[j].nPeriod = 0; Chn[j].nNote = 0; Chn[j].nPortamentoDest = 0; Chn[j].nCommand = 0; Chn[j].nPatternLoopCount = 0; Chn[j].nPatternLoop = 0; Chn[j].nTremorCount = 0; } if (!nPos) { SetCurrentPos(0); } else { m_nNextPattern = nPos; m_nRow = m_nNextRow = 0; m_nPattern = 0; m_nTickCount = m_nMusicSpeed; m_nBufferCount = 0; m_nTotalCount = 0; m_nPatternDelay = 0; m_nFrameDelay = 0; } m_dwSongFlags &= ~(SONG_PATTERNLOOP|SONG_CPUVERYHIGH|SONG_FADINGSONG|SONG_ENDREACHED|SONG_GLOBALFADE); }
void CSoundFile::SetCurrentOrder(UINT nPos) { while ((nPos < MAX_ORDERS) && (Order[nPos] == 0xFE)) nPos++; if ((nPos >= MAX_ORDERS) || (Order[nPos] >= MAX_PATTERNS)) return; for (UINT j=0; j<MAX_CHANNELS; j++) { Chn[j].nPeriod = 0; Chn[j].nNote = 0; Chn[j].nPortamentoDest = 0; Chn[j].nCommand = 0; Chn[j].nPatternLoopCount = 0; Chn[j].nPatternLoop = 0; Chn[j].nTremorCount = 0; } if (!nPos) { SetCurrentPos(0); } else { m_nNextPattern = nPos; m_nRow = m_nNextRow = 0; m_nPattern = 0; m_nTickCount = m_nMusicSpeed; m_nBufferCount = 0; m_nTotalCount = 0; m_nPatternDelay = 0; m_nFrameDelay = 0; } m_dwSongFlags &= ~(SONG_PATTERNLOOP|SONG_CPUVERYHIGH|SONG_FADINGSONG|SONG_ENDREACHED|SONG_GLOBALFADE); }
CPP
gstreamer
0
CVE-2018-11383
https://www.cvedetails.com/cve/CVE-2018-11383/
CWE-416
https://github.com/radare/radare2/commit/9d348bcc2c4bbd3805e7eec97b594be9febbdf9a
9d348bcc2c4bbd3805e7eec97b594be9febbdf9a
Fix #9943 - Invalid free on RAnal.avr
INST_HANDLER (lpm) { // LPM ut16 ins = (((ut16) buf[1]) << 8) | ((ut16) buf[0]); __generic_ld_st ( op, "prog", 'z', // index register Y/Z 1, // use RAMP* registers (ins & 0xfe0f) == 0x9005 ? 1 // post incremented : 0, // no increment 0, // not offset 0); // load operation (!st) ESIL_A ("r%d,=,", (ins == 0x95c8) ? 0 // LPM (r0) : ((buf[0] >> 4) & 0xf) // LPM Rd | ((buf[1] & 0x1) << 4)); }
INST_HANDLER (lpm) { // LPM ut16 ins = (((ut16) buf[1]) << 8) | ((ut16) buf[0]); __generic_ld_st ( op, "prog", 'z', // index register Y/Z 1, // use RAMP* registers (ins & 0xfe0f) == 0x9005 ? 1 // post incremented : 0, // no increment 0, // not offset 0); // load operation (!st) ESIL_A ("r%d,=,", (ins == 0x95c8) ? 0 // LPM (r0) : ((buf[0] >> 4) & 0xf) // LPM Rd | ((buf[1] & 0x1) << 4)); }
C
radare2
0
CVE-2017-8067
https://www.cvedetails.com/cve/CVE-2017-8067/
CWE-119
https://github.com/torvalds/linux/commit/c4baad50297d84bde1a7ad45e50c73adae4a2192
c4baad50297d84bde1a7ad45e50c73adae4a2192
virtio-console: avoid DMA from stack put_chars() stuffs the buffer it gets into an sg, but that buffer may be on the stack. This breaks with CONFIG_VMAP_STACK=y (for me, it manifested as printks getting turned into NUL bytes). Signed-off-by: Omar Sandoval <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Amit Shah <[email protected]>
static int add_port(struct ports_device *portdev, u32 id) { char debugfs_name[16]; struct port *port; struct port_buffer *buf; dev_t devt; unsigned int nr_added_bufs; int err; port = kmalloc(sizeof(*port), GFP_KERNEL); if (!port) { err = -ENOMEM; goto fail; } kref_init(&port->kref); port->portdev = portdev; port->id = id; port->name = NULL; port->inbuf = NULL; port->cons.hvc = NULL; port->async_queue = NULL; port->cons.ws.ws_row = port->cons.ws.ws_col = 0; port->host_connected = port->guest_connected = false; port->stats = (struct port_stats) { 0 }; port->outvq_full = false; port->in_vq = portdev->in_vqs[port->id]; port->out_vq = portdev->out_vqs[port->id]; port->cdev = cdev_alloc(); if (!port->cdev) { dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); err = -ENOMEM; goto free_port; } port->cdev->ops = &port_fops; devt = MKDEV(portdev->chr_major, id); err = cdev_add(port->cdev, devt, 1); if (err < 0) { dev_err(&port->portdev->vdev->dev, "Error %d adding cdev for port %u\n", err, id); goto free_cdev; } port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, devt, port, "vport%up%u", port->portdev->vdev->index, id); if (IS_ERR(port->dev)) { err = PTR_ERR(port->dev); dev_err(&port->portdev->vdev->dev, "Error %d creating device for port %u\n", err, id); goto free_cdev; } spin_lock_init(&port->inbuf_lock); spin_lock_init(&port->outvq_lock); init_waitqueue_head(&port->waitqueue); /* Fill the in_vq with buffers so the host can send us data. */ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); if (!nr_added_bufs) { dev_err(port->dev, "Error allocating inbufs\n"); err = -ENOMEM; goto free_device; } if (is_rproc_serial(port->portdev->vdev)) /* * For rproc_serial assume remote processor is connected. * rproc_serial does not want the console port, only * the generic port implementation. */ port->host_connected = true; else if (!use_multiport(port->portdev)) { /* * If we're not using multiport support, * this has to be a console port. */ err = init_port_console(port); if (err) goto free_inbufs; } spin_lock_irq(&portdev->ports_lock); list_add_tail(&port->list, &port->portdev->ports); spin_unlock_irq(&portdev->ports_lock); /* * Tell the Host we're set so that it can send us various * configuration parameters for this port (eg, port name, * caching, whether this is a console port, etc.) */ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); if (pdrvdata.debugfs_dir) { /* * Finally, create the debugfs file that we can use to * inspect a port's state at any time */ snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u", port->portdev->vdev->index, id); port->debugfs_file = debugfs_create_file(debugfs_name, 0444, pdrvdata.debugfs_dir, port, &port_debugfs_ops); } return 0; free_inbufs: while ((buf = virtqueue_detach_unused_buf(port->in_vq))) free_buf(buf, true); free_device: device_destroy(pdrvdata.class, port->dev->devt); free_cdev: cdev_del(port->cdev); free_port: kfree(port); fail: /* The host might want to notify management sw about port add failure */ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); return err; }
static int add_port(struct ports_device *portdev, u32 id) { char debugfs_name[16]; struct port *port; struct port_buffer *buf; dev_t devt; unsigned int nr_added_bufs; int err; port = kmalloc(sizeof(*port), GFP_KERNEL); if (!port) { err = -ENOMEM; goto fail; } kref_init(&port->kref); port->portdev = portdev; port->id = id; port->name = NULL; port->inbuf = NULL; port->cons.hvc = NULL; port->async_queue = NULL; port->cons.ws.ws_row = port->cons.ws.ws_col = 0; port->host_connected = port->guest_connected = false; port->stats = (struct port_stats) { 0 }; port->outvq_full = false; port->in_vq = portdev->in_vqs[port->id]; port->out_vq = portdev->out_vqs[port->id]; port->cdev = cdev_alloc(); if (!port->cdev) { dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); err = -ENOMEM; goto free_port; } port->cdev->ops = &port_fops; devt = MKDEV(portdev->chr_major, id); err = cdev_add(port->cdev, devt, 1); if (err < 0) { dev_err(&port->portdev->vdev->dev, "Error %d adding cdev for port %u\n", err, id); goto free_cdev; } port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, devt, port, "vport%up%u", port->portdev->vdev->index, id); if (IS_ERR(port->dev)) { err = PTR_ERR(port->dev); dev_err(&port->portdev->vdev->dev, "Error %d creating device for port %u\n", err, id); goto free_cdev; } spin_lock_init(&port->inbuf_lock); spin_lock_init(&port->outvq_lock); init_waitqueue_head(&port->waitqueue); /* Fill the in_vq with buffers so the host can send us data. */ nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); if (!nr_added_bufs) { dev_err(port->dev, "Error allocating inbufs\n"); err = -ENOMEM; goto free_device; } if (is_rproc_serial(port->portdev->vdev)) /* * For rproc_serial assume remote processor is connected. * rproc_serial does not want the console port, only * the generic port implementation. */ port->host_connected = true; else if (!use_multiport(port->portdev)) { /* * If we're not using multiport support, * this has to be a console port. */ err = init_port_console(port); if (err) goto free_inbufs; } spin_lock_irq(&portdev->ports_lock); list_add_tail(&port->list, &port->portdev->ports); spin_unlock_irq(&portdev->ports_lock); /* * Tell the Host we're set so that it can send us various * configuration parameters for this port (eg, port name, * caching, whether this is a console port, etc.) */ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); if (pdrvdata.debugfs_dir) { /* * Finally, create the debugfs file that we can use to * inspect a port's state at any time */ snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u", port->portdev->vdev->index, id); port->debugfs_file = debugfs_create_file(debugfs_name, 0444, pdrvdata.debugfs_dir, port, &port_debugfs_ops); } return 0; free_inbufs: while ((buf = virtqueue_detach_unused_buf(port->in_vq))) free_buf(buf, true); free_device: device_destroy(pdrvdata.class, port->dev->devt); free_cdev: cdev_del(port->cdev); free_port: kfree(port); fail: /* The host might want to notify management sw about port add failure */ __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); return err; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/4f1f3d0f03c79ddaace56f067cf28a27f9466b7d
4f1f3d0f03c79ddaace56f067cf28a27f9466b7d
Improve handling and testing of reparse points. BUG=28804 TEST=unit tests. Review URL: http://codereview.chromium.org/553080 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@37286 0039d316-1c4b-4281-b951-d872f2087c98
void ResolveNTFunctionPtr(const char* name, void* ptr) { HMODULE ntdll = ::GetModuleHandle(sandbox::kNtdllName); FARPROC* function_ptr = reinterpret_cast<FARPROC*>(ptr); *function_ptr = ::GetProcAddress(ntdll, name); if (*function_ptr) return; *function_ptr = ::GetProcAddress(ntdll, name); CHECK(*function_ptr); }
void ResolveNTFunctionPtr(const char* name, void* ptr) { HMODULE ntdll = ::GetModuleHandle(sandbox::kNtdllName); FARPROC* function_ptr = reinterpret_cast<FARPROC*>(ptr); *function_ptr = ::GetProcAddress(ntdll, name); if (*function_ptr) return; *function_ptr = ::GetProcAddress(ntdll, name); CHECK(*function_ptr); }
C
Chrome
0
CVE-2018-13006
https://www.cvedetails.com/cve/CVE-2018-13006/
CWE-125
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
bceb03fd2be95097a7b409ea59914f332fb6bc86
fixed 2 possible heap overflows (inc. #1088)
GF_Err metx_Read(GF_Box *s, GF_BitStream *bs) { u32 size, i; GF_Err e; char *str; GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; size = (u32) ptr->size - 8; str = gf_malloc(sizeof(char)*size); i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) break; i++; } if (i) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_namespace = gf_strdup(str); } else { ptr->content_encoding = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) break; i++; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (i) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_schema_loc = gf_strdup(str); } else { ptr->xml_namespace = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) break; i++; } if (i) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->mime_type = gf_strdup(str); } else { ptr->xml_schema_loc = gf_strdup(str); } } } else { if (i) ptr->mime_type = gf_strdup(str); } ptr->size = size; gf_free(str); return gf_isom_box_array_read(s, bs, metx_AddBox); }
GF_Err metx_Read(GF_Box *s, GF_BitStream *bs) { u32 size, i; GF_Err e; char *str; GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; size = (u32) ptr->size - 8; str = gf_malloc(sizeof(char)*size); i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) break; i++; } if (i) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_namespace = gf_strdup(str); } else { ptr->content_encoding = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) break; i++; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (i) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_schema_loc = gf_strdup(str); } else { ptr->xml_namespace = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) break; i++; } if (i) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->mime_type = gf_strdup(str); } else { ptr->xml_schema_loc = gf_strdup(str); } } } else { if (i) ptr->mime_type = gf_strdup(str); } ptr->size = size; gf_free(str); return gf_isom_box_array_read(s, bs, metx_AddBox); }
C
gpac
0
CVE-2012-2875
https://www.cvedetails.com/cve/CVE-2012-2875/
null
https://github.com/chromium/chromium/commit/3ea4ba8af75eb37860c15d02af94f272e5bbc235
3ea4ba8af75eb37860c15d02af94f272e5bbc235
Crash fix in fileapi::FileSystemOperation::DidGetUsageAndQuotaAndRunTask https://chromiumcodereview.appspot.com/10008047 introduced delete-with-inflight-tasks in Write sequence but I failed to convert this callback to use WeakPtr(). BUG=128178 TEST=manual test Review URL: https://chromiumcodereview.appspot.com/10408006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137635 0039d316-1c4b-4281-b951-d872f2087c98
void FileSystemOperation::DoCopy(const StatusCallback& callback) { FileSystemFileUtilProxy::Copy( &operation_context_, src_util_, dest_util_, src_path_, dest_path_, base::Bind(&FileSystemOperation::DidFinishFileOperation, base::Owned(this), callback)); }
void FileSystemOperation::DoCopy(const StatusCallback& callback) { FileSystemFileUtilProxy::Copy( &operation_context_, src_util_, dest_util_, src_path_, dest_path_, base::Bind(&FileSystemOperation::DidFinishFileOperation, base::Owned(this), callback)); }
C
Chrome
0
CVE-2016-4302
https://www.cvedetails.com/cve/CVE-2016-4302/
CWE-119
https://github.com/libarchive/libarchive/commit/05caadc7eedbef471ac9610809ba683f0c698700
05caadc7eedbef471ac9610809ba683f0c698700
Issue 719: Fix for TALOS-CAN-154 A RAR file with an invalid zero dictionary size was not being rejected, leading to a zero-sized allocation for the dictionary storage which was then overwritten during the dictionary initialization. Thanks to the Open Source and Threat Intelligence project at Cisco for reporting this.
lzss_emit_literal(struct rar *rar, uint8_t literal) { *lzss_current_pointer(&rar->lzss) = literal; rar->lzss.position++; }
lzss_emit_literal(struct rar *rar, uint8_t literal) { *lzss_current_pointer(&rar->lzss) = literal; rar->lzss.position++; }
C
libarchive
0
CVE-2013-7271
https://www.cvedetails.com/cve/CVE-2013-7271/
CWE-20
https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct pppol2tp_session *ps = l2tp_session_priv(session); struct sock *sk = NULL; /* If the socket is bound, send it in to PPP's input queue. Otherwise * queue it on the session socket. */ sk = ps->sock; if (sk == NULL) goto no_sock; if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; l2tp_dbg(session, PPPOL2TP_MSG_DATA, "%s: recv %d byte data frame, passing to ppp\n", session->name, data_len); /* We need to forget all info related to the L2TP packet * gathered in the skb as we are going to reuse the same * skb for the inner packet. * Namely we need to: * - reset xfrm (IPSec) information as it applies to * the outer L2TP packet and not to the inner one * - release the dst to force a route lookup on the inner * IP packet since skb->dst currently points to the dst * of the UDP tunnel * - reset netfilter information as it doesn't apply * to the inner packet either */ secpath_reset(skb); skb_dst_drop(skb); nf_reset(skb); po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", session->name); /* Not bound. Nothing we can do, so discard. */ atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } return; no_sock: l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name); kfree_skb(skb); }
static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct pppol2tp_session *ps = l2tp_session_priv(session); struct sock *sk = NULL; /* If the socket is bound, send it in to PPP's input queue. Otherwise * queue it on the session socket. */ sk = ps->sock; if (sk == NULL) goto no_sock; if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; l2tp_dbg(session, PPPOL2TP_MSG_DATA, "%s: recv %d byte data frame, passing to ppp\n", session->name, data_len); /* We need to forget all info related to the L2TP packet * gathered in the skb as we are going to reuse the same * skb for the inner packet. * Namely we need to: * - reset xfrm (IPSec) information as it applies to * the outer L2TP packet and not to the inner one * - release the dst to force a route lookup on the inner * IP packet since skb->dst currently points to the dst * of the UDP tunnel * - reset netfilter information as it doesn't apply * to the inner packet either */ secpath_reset(skb); skb_dst_drop(skb); nf_reset(skb); po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", session->name); /* Not bound. Nothing we can do, so discard. */ atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } return; no_sock: l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: no socket\n", session->name); kfree_skb(skb); }
C
linux
0
CVE-2016-1696
https://www.cvedetails.com/cve/CVE-2016-1696/
CWE-284
https://github.com/chromium/chromium/commit/c0569cc04741cccf6548c2169fcc1609d958523f
c0569cc04741cccf6548c2169fcc1609d958523f
[Extensions] Expand bindings access checks BUG=601149 BUG=601073 Review URL: https://codereview.chromium.org/1866103002 Cr-Commit-Position: refs/heads/master@{#387710}
TestNativeHandler::TestNativeHandler(ScriptContext* context) : ObjectBackedNativeHandler(context) { RouteFunction( "GetWakeEventPage", "test", base::Bind(&TestNativeHandler::GetWakeEventPage, base::Unretained(this))); }
TestNativeHandler::TestNativeHandler(ScriptContext* context) : ObjectBackedNativeHandler(context) { RouteFunction( "GetWakeEventPage", base::Bind(&TestNativeHandler::GetWakeEventPage, base::Unretained(this))); }
C
Chrome
1
null
null
null
https://github.com/chromium/chromium/commit/76f36a8362a3e817cc3ec721d591f2f8878dc0c7
76f36a8362a3e817cc3ec721d591f2f8878dc0c7
Scheduler/child/TimeSource could be replaced with base/time/DefaultTickClock. They both are totally same and TimeSource is removed. BUG=494892 [email protected], [email protected] Review URL: https://codereview.chromium.org/1163143002 Cr-Commit-Position: refs/heads/master@{#333035}
void TaskQueue::PumpQueue() { base::AutoLock lock(lock_); PumpQueueLocked(); }
void TaskQueue::PumpQueue() { base::AutoLock lock(lock_); PumpQueueLocked(); }
C
Chrome
0
CVE-2018-17206
https://www.cvedetails.com/cve/CVE-2018-17206/
null
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
9237a63c47bd314b807cda0bd2216264e82edbe8
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
decode_OFPAT_RAW10_SET_VLAN_PCP(uint8_t pcp, enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out) { return decode_set_vlan_pcp(pcp, true, out); }
decode_OFPAT_RAW10_SET_VLAN_PCP(uint8_t pcp, enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out) { return decode_set_vlan_pcp(pcp, true, out); }
C
ovs
0
CVE-2019-5755
https://www.cvedetails.com/cve/CVE-2019-5755/
CWE-189
https://github.com/chromium/chromium/commit/971548cdca2d4c0a6fedd3db0c94372c2a27eac3
971548cdca2d4c0a6fedd3db0c94372c2a27eac3
Make MediaStreamDispatcherHost per-request instead of per-frame. Instead of having RenderFrameHost own a single MSDH to handle all requests from a frame, MSDH objects will be owned by a strong binding. A consequence of this is that an additional requester ID is added to requests to MediaStreamManager, so that an MSDH is able to cancel only requests generated by it. In practice, MSDH will continue to be per frame in most cases since each frame normally makes a single request for an MSDH object. This fixes a lifetime issue caused by the IO thread executing tasks after the RenderFrameHost dies. Drive-by: Fix some minor lint issues. Bug: 912520 Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516 Reviewed-on: https://chromium-review.googlesource.com/c/1369799 Reviewed-by: Emircan Uysaler <[email protected]> Reviewed-by: Ken Buchanan <[email protected]> Reviewed-by: Olga Sharonova <[email protected]> Commit-Queue: Guido Urdaneta <[email protected]> Cr-Commit-Position: refs/heads/master@{#616347}
void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, bool ask_user, bool is_allowed) { DCHECK_CURRENTLY_ON(BrowserThread::IO); auto iter = sessions_.find(session_id); if (iter == sessions_.end()) return; Session* session = iter->second.get(); if (session->abort_requested) return; if (ask_user) { SpeechRecognitionSessionContext& context = session->context; context.label = media_stream_manager_->MakeMediaAccessRequest( context.render_process_id, context.render_frame_id, requester_id_, session_id, StreamControls(true, false), context.security_origin, base::BindOnce( &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, weak_factory_.GetWeakPtr(), session_id)); return; } if (is_allowed) { base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_START)); } else { OnRecognitionError( session_id, blink::mojom::SpeechRecognitionError( blink::mojom::SpeechRecognitionErrorCode::kNotAllowed, blink::mojom::SpeechAudioErrorDetails::kNone)); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); } }
void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, bool ask_user, bool is_allowed) { DCHECK_CURRENTLY_ON(BrowserThread::IO); auto iter = sessions_.find(session_id); if (iter == sessions_.end()) return; Session* session = iter->second.get(); if (session->abort_requested) return; if (ask_user) { SpeechRecognitionSessionContext& context = session->context; context.label = media_stream_manager_->MakeMediaAccessRequest( context.render_process_id, context.render_frame_id, session_id, StreamControls(true, false), context.security_origin, base::BindOnce( &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, weak_factory_.GetWeakPtr(), session_id)); return; } if (is_allowed) { base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_START)); } else { OnRecognitionError( session_id, blink::mojom::SpeechRecognitionError( blink::mojom::SpeechRecognitionErrorCode::kNotAllowed, blink::mojom::SpeechAudioErrorDetails::kNone)); base::ThreadTaskRunnerHandle::Get()->PostTask( FROM_HERE, base::BindOnce(&SpeechRecognitionManagerImpl::DispatchEvent, weak_factory_.GetWeakPtr(), session_id, EVENT_ABORT)); } }
C
Chrome
1
CVE-2018-10021
https://www.cvedetails.com/cve/CVE-2018-10021/
null
https://github.com/torvalds/linux/commit/318aaf34f1179b39fa9c30fa0f3288b645beee39
318aaf34f1179b39fa9c30fa0f3288b645beee39
scsi: libsas: defer ata device eh commands to libata When ata device doing EH, some commands still attached with tasks are not passed to libata when abort failed or recover failed, so libata did not handle these commands. After these commands done, sas task is freed, but ata qc is not freed. This will cause ata qc leak and trigger a warning like below: WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037 ata_eh_finish+0xb4/0xcc CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G W OE 4.14.0#1 ...... Call trace: [<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc [<ffff0000088b8420>] ata_do_eh+0xc4/0xd8 [<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c [<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694 [<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80 [<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170 [<ffff0000080ebd70>] process_one_work+0x144/0x390 [<ffff0000080ec100>] worker_thread+0x144/0x418 [<ffff0000080f2c98>] kthread+0x10c/0x138 [<ffff0000080855dc>] ret_from_fork+0x10/0x18 If ata qc leaked too many, ata tag allocation will fail and io blocked for ever. As suggested by Dan Williams, defer ata device commands to libata and merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle ata qcs correctly after this. Signed-off-by: Jason Yan <[email protected]> CC: Xiaofei Tan <[email protected]> CC: John Garry <[email protected]> CC: Dan Williams <[email protected]> Reviewed-by: Dan Williams <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
int sas_eh_abort_handler(struct scsi_cmnd *cmd) { int res; struct sas_task *task = TO_SAS_TASK(cmd); struct Scsi_Host *host = cmd->device->host; struct sas_internal *i = to_sas_internal(host->transportt); if (!i->dft->lldd_abort_task) return FAILED; res = i->dft->lldd_abort_task(task); if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) return SUCCESS; return FAILED; }
int sas_eh_abort_handler(struct scsi_cmnd *cmd) { int res; struct sas_task *task = TO_SAS_TASK(cmd); struct Scsi_Host *host = cmd->device->host; struct sas_internal *i = to_sas_internal(host->transportt); if (!i->dft->lldd_abort_task) return FAILED; res = i->dft->lldd_abort_task(task); if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) return SUCCESS; return FAILED; }
C
linux
0
CVE-2016-7530
https://www.cvedetails.com/cve/CVE-2016-7530/
CWE-369
https://github.com/ImageMagick/ImageMagick/commit/b5ed738f8060266bf4ae521f7e3ed145aa4498a3
b5ed738f8060266bf4ae521f7e3ed145aa4498a3
https://github.com/ImageMagick/ImageMagick/issues/110
MagickExport void SetQuantumScale(QuantumInfo *quantum_info,const double scale) { assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickSignature); quantum_info->scale=scale; }
MagickExport void SetQuantumScale(QuantumInfo *quantum_info,const double scale) { assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickSignature); quantum_info->scale=scale; }
C
ImageMagick
0
CVE-2013-0250
https://www.cvedetails.com/cve/CVE-2013-0250/
null
https://github.com/corosync/corosync/commit/b3f456a8ceefac6e9f2e9acc2ea0c159d412b595
b3f456a8ceefac6e9f2e9acc2ea0c159d412b595
totemcrypto: fix hmac key initialization Signed-off-by: Fabio M. Di Nitto <[email protected]> Reviewed-by: Jan Friesse <[email protected]>
static int encrypt_and_sign_nss_2_2 ( struct crypto_instance *instance, const unsigned char *buf_in, const size_t buf_in_len, unsigned char *buf_out, size_t *buf_out_len) { if (encrypt_nss(instance, buf_in, buf_in_len, buf_out + sizeof(struct crypto_config_header), buf_out_len) < 0) { return -1; } *buf_out_len += sizeof(struct crypto_config_header); if (hash_to_nss[instance->crypto_hash_type]) { if (calculate_nss_hash(instance, buf_out, *buf_out_len, buf_out + *buf_out_len) < 0) { return -1; } *buf_out_len += hash_len[instance->crypto_hash_type]; } return 0; }
static int encrypt_and_sign_nss_2_2 ( struct crypto_instance *instance, const unsigned char *buf_in, const size_t buf_in_len, unsigned char *buf_out, size_t *buf_out_len) { if (encrypt_nss(instance, buf_in, buf_in_len, buf_out + sizeof(struct crypto_config_header), buf_out_len) < 0) { return -1; } *buf_out_len += sizeof(struct crypto_config_header); if (hash_to_nss[instance->crypto_hash_type]) { if (calculate_nss_hash(instance, buf_out, *buf_out_len, buf_out + *buf_out_len) < 0) { return -1; } *buf_out_len += hash_len[instance->crypto_hash_type]; } return 0; }
C
corosync
0
CVE-2018-0494
https://www.cvedetails.com/cve/CVE-2018-0494/
CWE-20
https://git.savannah.gnu.org/cgit/wget.git/commit/?id=1fc9c95ec144499e69dc8ec76dbe07799d7d82cd
1fc9c95ec144499e69dc8ec76dbe07799d7d82cd
null
read_response_body (struct http_stat *hs, int sock, FILE *fp, wgint contlen, wgint contrange, bool chunked_transfer_encoding, char *url, char *warc_timestamp_str, char *warc_request_uuid, ip_address *warc_ip, char *type, int statcode, char *head) { int warc_payload_offset = 0; FILE *warc_tmp = NULL; int warcerr = 0; int flags = 0; if (opt.warc_filename != NULL) { /* Open a temporary file where we can write the response before we add it to the WARC record. */ warc_tmp = warc_tempfile (); if (warc_tmp == NULL) warcerr = WARC_TMP_FOPENERR; if (warcerr == 0) { /* We should keep the response headers for the WARC record. */ int head_len = strlen (head); int warc_tmp_written = fwrite (head, 1, head_len, warc_tmp); if (warc_tmp_written != head_len) warcerr = WARC_TMP_FWRITEERR; warc_payload_offset = head_len; } if (warcerr != 0) { if (warc_tmp != NULL) fclose (warc_tmp); return warcerr; } } if (fp != NULL) { /* This confuses the timestamping code that checks for file size. #### The timestamping code should be smarter about file size. */ if (opt.save_headers && hs->restval == 0) fwrite (head, 1, strlen (head), fp); } /* Read the response body. */ if (contlen != -1) /* If content-length is present, read that much; otherwise, read until EOF. The HTTP spec doesn't require the server to actually close the connection when it's done sending data. */ flags |= rb_read_exactly; if (fp != NULL && hs->restval > 0 && contrange == 0) /* If the server ignored our range request, instruct fd_read_body to skip the first RESTVAL bytes of body. */ flags |= rb_skip_startpos; if (chunked_transfer_encoding) flags |= rb_chunked_transfer_encoding; if (hs->remote_encoding == ENC_GZIP) flags |= rb_compressed_gzip; hs->len = hs->restval; hs->rd_size = 0; /* Download the response body and write it to fp. If we are working on a WARC file, we simultaneously write the response body to warc_tmp. */ hs->res = fd_read_body (hs->local_file, sock, fp, contlen != -1 ? contlen : 0, hs->restval, &hs->rd_size, &hs->len, &hs->dltime, flags, warc_tmp); if (hs->res >= 0) { if (warc_tmp != NULL) { /* Create a response record and write it to the WARC file. Note: per the WARC standard, the request and response should share the same date header. We re-use the timestamp of the request. The response record should also refer to the uuid of the request. */ bool r = warc_write_response_record (url, warc_timestamp_str, warc_request_uuid, warc_ip, warc_tmp, warc_payload_offset, type, statcode, hs->newloc); /* warc_write_response_record has closed warc_tmp. */ if (! r) return WARC_ERR; } return RETRFINISHED; } if (warc_tmp != NULL) fclose (warc_tmp); if (hs->res == -2) { /* Error while writing to fd. */ return FWRITEERR; } else if (hs->res == -3) { /* Error while writing to warc_tmp. */ return WARC_TMP_FWRITEERR; } else { /* A read error! */ hs->rderrmsg = xstrdup (fd_errstr (sock)); return RETRFINISHED; } }
read_response_body (struct http_stat *hs, int sock, FILE *fp, wgint contlen, wgint contrange, bool chunked_transfer_encoding, char *url, char *warc_timestamp_str, char *warc_request_uuid, ip_address *warc_ip, char *type, int statcode, char *head) { int warc_payload_offset = 0; FILE *warc_tmp = NULL; int warcerr = 0; int flags = 0; if (opt.warc_filename != NULL) { /* Open a temporary file where we can write the response before we add it to the WARC record. */ warc_tmp = warc_tempfile (); if (warc_tmp == NULL) warcerr = WARC_TMP_FOPENERR; if (warcerr == 0) { /* We should keep the response headers for the WARC record. */ int head_len = strlen (head); int warc_tmp_written = fwrite (head, 1, head_len, warc_tmp); if (warc_tmp_written != head_len) warcerr = WARC_TMP_FWRITEERR; warc_payload_offset = head_len; } if (warcerr != 0) { if (warc_tmp != NULL) fclose (warc_tmp); return warcerr; } } if (fp != NULL) { /* This confuses the timestamping code that checks for file size. #### The timestamping code should be smarter about file size. */ if (opt.save_headers && hs->restval == 0) fwrite (head, 1, strlen (head), fp); } /* Read the response body. */ if (contlen != -1) /* If content-length is present, read that much; otherwise, read until EOF. The HTTP spec doesn't require the server to actually close the connection when it's done sending data. */ flags |= rb_read_exactly; if (fp != NULL && hs->restval > 0 && contrange == 0) /* If the server ignored our range request, instruct fd_read_body to skip the first RESTVAL bytes of body. */ flags |= rb_skip_startpos; if (chunked_transfer_encoding) flags |= rb_chunked_transfer_encoding; if (hs->remote_encoding == ENC_GZIP) flags |= rb_compressed_gzip; hs->len = hs->restval; hs->rd_size = 0; /* Download the response body and write it to fp. If we are working on a WARC file, we simultaneously write the response body to warc_tmp. */ hs->res = fd_read_body (hs->local_file, sock, fp, contlen != -1 ? contlen : 0, hs->restval, &hs->rd_size, &hs->len, &hs->dltime, flags, warc_tmp); if (hs->res >= 0) { if (warc_tmp != NULL) { /* Create a response record and write it to the WARC file. Note: per the WARC standard, the request and response should share the same date header. We re-use the timestamp of the request. The response record should also refer to the uuid of the request. */ bool r = warc_write_response_record (url, warc_timestamp_str, warc_request_uuid, warc_ip, warc_tmp, warc_payload_offset, type, statcode, hs->newloc); /* warc_write_response_record has closed warc_tmp. */ if (! r) return WARC_ERR; } return RETRFINISHED; } if (warc_tmp != NULL) fclose (warc_tmp); if (hs->res == -2) { /* Error while writing to fd. */ return FWRITEERR; } else if (hs->res == -3) { /* Error while writing to warc_tmp. */ return WARC_TMP_FWRITEERR; } else { /* A read error! */ hs->rderrmsg = xstrdup (fd_errstr (sock)); return RETRFINISHED; } }
C
savannah
0
CVE-2011-3097
https://www.cvedetails.com/cve/CVE-2011-3097/
CWE-20
https://github.com/chromium/chromium/commit/027429ee5abe6e2fb5e3b2b4542f0a6fe0dbc12d
027429ee5abe6e2fb5e3b2b4542f0a6fe0dbc12d
Metrics for measuring how much overhead reading compressed content states adds. BUG=104293 TEST=NONE Review URL: https://chromiumcodereview.appspot.com/9426039 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@123733 0039d316-1c4b-4281-b951-d872f2087c98
void SessionService::UpdateSelectedTabIndex( std::vector<SessionWindow*>* windows) { for (std::vector<SessionWindow*>::const_iterator i = windows->begin(); i != windows->end(); ++i) { int new_index = 0; for (std::vector<SessionTab*>::const_iterator j = (*i)->tabs.begin(); j != (*i)->tabs.end(); ++j) { if ((*j)->tab_visual_index == (*i)->selected_tab_index) { new_index = static_cast<int>(j - (*i)->tabs.begin()); break; } } (*i)->selected_tab_index = new_index; } }
void SessionService::UpdateSelectedTabIndex( std::vector<SessionWindow*>* windows) { for (std::vector<SessionWindow*>::const_iterator i = windows->begin(); i != windows->end(); ++i) { int new_index = 0; for (std::vector<SessionTab*>::const_iterator j = (*i)->tabs.begin(); j != (*i)->tabs.end(); ++j) { if ((*j)->tab_visual_index == (*i)->selected_tab_index) { new_index = static_cast<int>(j - (*i)->tabs.begin()); break; } } (*i)->selected_tab_index = new_index; } }
C
Chrome
0
CVE-2017-6903
https://www.cvedetails.com/cve/CVE-2017-6903/
CWE-269
https://github.com/iortcw/iortcw/commit/b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
b6ff2bcb1e4e6976d61e316175c6d7c99860fe20
All: Don't load .pk3s as .dlls, and don't load user config files from .pk3s
const char *FS_ReferencedPakPureChecksums( void ) { static char info[BIG_INFO_STRING]; searchpath_t *search; int nFlags, numPaks, checksum; info[0] = 0; checksum = fs_checksumFeed; numPaks = 0; for ( nFlags = FS_GENERAL_REF; nFlags; nFlags = nFlags >> 1 ) { for ( search = fs_searchpaths ; search ; search = search->next ) { if ( search->pack && ( search->pack->referenced & nFlags ) ) { Q_strcat( info, sizeof( info ), va( "%i ", search->pack->pure_checksum ) ); checksum ^= search->pack->pure_checksum; numPaks++; } } } checksum ^= numPaks; Q_strcat( info, sizeof( info ), va( "%i ", checksum ) ); return info; }
const char *FS_ReferencedPakPureChecksums( void ) { static char info[BIG_INFO_STRING]; searchpath_t *search; int nFlags, numPaks, checksum; info[0] = 0; checksum = fs_checksumFeed; numPaks = 0; for ( nFlags = FS_GENERAL_REF; nFlags; nFlags = nFlags >> 1 ) { for ( search = fs_searchpaths ; search ; search = search->next ) { if ( search->pack && ( search->pack->referenced & nFlags ) ) { Q_strcat( info, sizeof( info ), va( "%i ", search->pack->pure_checksum ) ); checksum ^= search->pack->pure_checksum; numPaks++; } } } checksum ^= numPaks; Q_strcat( info, sizeof( info ), va( "%i ", checksum ) ); return info; }
C
OpenJK
0
CVE-2017-5112
https://www.cvedetails.com/cve/CVE-2017-5112/
CWE-119
https://github.com/chromium/chromium/commit/f6ac1dba5e36f338a490752a2cbef3339096d9fe
f6ac1dba5e36f338a490752a2cbef3339096d9fe
Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later. BUG=740603 TEST=new conformance test [email protected],[email protected] Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4 Reviewed-on: https://chromium-review.googlesource.com/570840 Reviewed-by: Antoine Labour <[email protected]> Reviewed-by: Kenneth Russell <[email protected]> Commit-Queue: Zhenyao Mo <[email protected]> Cr-Commit-Position: refs/heads/master@{#486518}
void WebGL2RenderingContextBase::resumeTransformFeedback() { if (isContextLost()) return; if (transform_feedback_binding_ && transform_feedback_binding_->GetProgram() != current_program_) { SynthesizeGLError(GL_INVALID_OPERATION, "resumeTransformFeedback", "the program object is not active"); return; } ContextGL()->ResumeTransformFeedback(); }
void WebGL2RenderingContextBase::resumeTransformFeedback() { if (isContextLost()) return; if (transform_feedback_binding_ && transform_feedback_binding_->GetProgram() != current_program_) { SynthesizeGLError(GL_INVALID_OPERATION, "resumeTransformFeedback", "the program object is not active"); return; } ContextGL()->ResumeTransformFeedback(); }
C
Chrome
0
CVE-2016-2543
https://www.cvedetails.com/cve/CVE-2016-2543/
null
https://github.com/torvalds/linux/commit/030e2c78d3a91dd0d27fef37e91950dde333eba1
030e2c78d3a91dd0d27fef37e91950dde333eba1
ALSA: seq: Fix missing NULL check at remove_events ioctl snd_seq_ioctl_remove_events() calls snd_seq_fifo_clear() unconditionally even if there is no FIFO assigned, and this leads to an Oops due to NULL dereference. The fix is just to add a proper NULL check. Reported-by: Dmitry Vyukov <[email protected]> Tested-by: Dmitry Vyukov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void __user *arg) { struct snd_seq_client_port *port; struct snd_seq_port_info info; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; if (info.addr.client != client->number) /* only set our own ports ! */ return -EPERM; port = snd_seq_port_use_ptr(client, info.addr.port); if (port) { snd_seq_set_port_info(port, &info); snd_seq_port_unlock(port); } return 0; }
static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void __user *arg) { struct snd_seq_client_port *port; struct snd_seq_port_info info; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; if (info.addr.client != client->number) /* only set our own ports ! */ return -EPERM; port = snd_seq_port_use_ptr(client, info.addr.port); if (port) { snd_seq_set_port_info(port, &info); snd_seq_port_unlock(port); } return 0; }
C
linux
0
CVE-2016-3760
https://www.cvedetails.com/cve/CVE-2016-3760/
CWE-20
https://android.googlesource.com/platform/system/bt/+/37c88107679d36c419572732b4af6e18bb2f7dce
37c88107679d36c419572732b4af6e18bb2f7dce
Add guest mode functionality (2/3) Add a flag to enable() to start Bluetooth in restricted mode. In restricted mode, all devices that are paired during restricted mode are deleted upon leaving restricted mode. Right now restricted mode is only entered while a guest user is active. Bug: 27410683 Change-Id: I8f23d28ef0aa3a8df13d469c73005c8e1b894d19
bool btif_config_set_bin(const char *section, const char *key, const uint8_t *value, size_t length) { const char *lookup = "0123456789abcdef"; assert(config != NULL); assert(section != NULL); assert(key != NULL); if (length > 0) assert(value != NULL); char *str = (char *)osi_calloc(length * 2 + 1); if (!str) return false; for (size_t i = 0; i < length; ++i) { str[(i * 2) + 0] = lookup[(value[i] >> 4) & 0x0F]; str[(i * 2) + 1] = lookup[value[i] & 0x0F]; } pthread_mutex_lock(&lock); config_set_string(config, section, key, str); pthread_mutex_unlock(&lock); osi_free(str); return true; }
bool btif_config_set_bin(const char *section, const char *key, const uint8_t *value, size_t length) { const char *lookup = "0123456789abcdef"; assert(config != NULL); assert(section != NULL); assert(key != NULL); if (length > 0) assert(value != NULL); char *str = (char *)osi_calloc(length * 2 + 1); if (!str) return false; for (size_t i = 0; i < length; ++i) { str[(i * 2) + 0] = lookup[(value[i] >> 4) & 0x0F]; str[(i * 2) + 1] = lookup[value[i] & 0x0F]; } pthread_mutex_lock(&lock); config_set_string(config, section, key, str); pthread_mutex_unlock(&lock); osi_free(str); return true; }
C
Android
0
CVE-2018-16513
https://www.cvedetails.com/cve/CVE-2018-16513/
CWE-704
http://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=b326a71659b7837d3acde954b18bda1a6f5e9498
b326a71659b7837d3acde954b18bda1a6f5e9498
null
static int devicenvalidate(i_ctx_t *i_ctx_p, ref *space, float *values, int num_comps) { int i, code; ref narray; os_ptr op = osp; code = array_get(imemory, space, 1, &narray); if (code < 0) return code; if (!r_is_array(&narray)) return_error(gs_error_typecheck); if (num_comps < r_size(&narray)) return_error(gs_error_stackunderflow); op -= r_size(&narray) - 1; for (i=0;i < r_size(&narray); i++) { if (!r_has_type(op, t_integer) && !r_has_type(op, t_real)) return_error(gs_error_typecheck); if (values[i] > 1.0) values[i] = 1.0; if (values[i] < 0.0) values[i] = 0.0; op++; } return 0; }
static int devicenvalidate(i_ctx_t *i_ctx_p, ref *space, float *values, int num_comps) { int i, code; ref narray; os_ptr op = osp; code = array_get(imemory, space, 1, &narray); if (code < 0) return code; if (!r_is_array(&narray)) return_error(gs_error_typecheck); if (num_comps < r_size(&narray)) return_error(gs_error_stackunderflow); op -= r_size(&narray) - 1; for (i=0;i < r_size(&narray); i++) { if (!r_has_type(op, t_integer) && !r_has_type(op, t_real)) return_error(gs_error_typecheck); if (values[i] > 1.0) values[i] = 1.0; if (values[i] < 0.0) values[i] = 0.0; op++; } return 0; }
C
ghostscript
0
CVE-2018-20961
https://www.cvedetails.com/cve/CVE-2018-20961/
CWE-415
https://github.com/torvalds/linux/commit/7fafcfdf6377b18b2a726ea554d6e593ba44349f
7fafcfdf6377b18b2a726ea554d6e593ba44349f
USB: gadget: f_midi: fixing a possible double-free in f_midi It looks like there is a possibility of a double-free vulnerability on an error path of the f_midi_set_alt function in the f_midi driver. If the path is feasible then free_ep_req gets called twice: req->complete = f_midi_complete; err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC); => ... usb_gadget_giveback_request => f_midi_complete (CALLBACK) (inside f_midi_complete, for various cases of status) free_ep_req(ep, req); // first kfree if (err) { ERROR(midi, "%s: couldn't enqueue request: %d\n", midi->out_ep->name, err); free_ep_req(midi->out_ep, req); // second kfree return err; } The double-free possibility was introduced with commit ad0d1a058eac ("usb: gadget: f_midi: fix leak on failed to enqueue out requests"). Found by MOXCAFE tool. Signed-off-by: Tuba Yavuz <[email protected]> Fixes: ad0d1a058eac ("usb: gadget: f_midi: fix leak on failed to enqueue out requests") Acked-by: Felipe Balbi <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void f_midi_drop_out_substreams(struct f_midi *midi) { unsigned int i; for (i = 0; i < midi->in_ports; i++) { struct gmidi_in_port *port = midi->in_ports_array + i; struct snd_rawmidi_substream *substream = port->substream; if (port->active && substream) snd_rawmidi_drop_output(substream); } }
static void f_midi_drop_out_substreams(struct f_midi *midi) { unsigned int i; for (i = 0; i < midi->in_ports; i++) { struct gmidi_in_port *port = midi->in_ports_array + i; struct snd_rawmidi_substream *substream = port->substream; if (port->active && substream) snd_rawmidi_drop_output(substream); } }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/19190765882e272a6a2162c89acdb29110f7e3cf
19190765882e272a6a2162c89acdb29110f7e3cf
Revert 102184 - [Sync] use base::Time in sync Make EntryKernel/Entry/BaseNode use base::Time instead of int64s. Add sync/util/time.h, with utility functions to manage the sync proto time format. Store times on disk in proto format instead of the local system. This requires a database version bump (to 77). Update SessionChangeProcessor/SessionModelAssociator to use base::Time, too. Remove hackish Now() function. Remove ZeroFields() function, and instead zero-initialize in EntryKernel::EntryKernel() directly. BUG= TEST= Review URL: http://codereview.chromium.org/7981006 [email protected] Review URL: http://codereview.chromium.org/7977034 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@102186 0039d316-1c4b-4281-b951-d872f2087c98
bool Get(int64 metahandle, syncable::BitField field) const { return GetField(metahandle, field, false); }
bool Get(int64 metahandle, syncable::BitField field) const { return GetField(metahandle, field, false); }
C
Chrome
0
CVE-2019-15538
https://www.cvedetails.com/cve/CVE-2019-15538/
CWE-399
https://github.com/torvalds/linux/commit/1fb254aa983bf190cfd685d40c64a480a9bafaee
1fb254aa983bf190cfd685d40c64a480a9bafaee
xfs: fix missing ILOCK unlock when xfs_setattr_nonsize fails due to EDQUOT Benjamin Moody reported to Debian that XFS partially wedges when a chgrp fails on account of being out of disk quota. I ran his reproducer script: # adduser dummy # adduser dummy plugdev # dd if=/dev/zero bs=1M count=100 of=test.img # mkfs.xfs test.img # mount -t xfs -o gquota test.img /mnt # mkdir -p /mnt/dummy # chown -c dummy /mnt/dummy # xfs_quota -xc 'limit -g bsoft=100k bhard=100k plugdev' /mnt (and then as user dummy) $ dd if=/dev/urandom bs=1M count=50 of=/mnt/dummy/foo $ chgrp plugdev /mnt/dummy/foo and saw: ================================================ WARNING: lock held when returning to user space! 5.3.0-rc5 #rc5 Tainted: G W ------------------------------------------------ chgrp/47006 is leaving the kernel with locks still held! 1 lock held by chgrp/47006: #0: 000000006664ea2d (&xfs_nondir_ilock_class){++++}, at: xfs_ilock+0xd2/0x290 [xfs] ...which is clearly caused by xfs_setattr_nonsize failing to unlock the ILOCK after the xfs_qm_vop_chown_reserve call fails. Add the missing unlock. Reported-by: [email protected] Fixes: 253f4911f297 ("xfs: better xfs_trans_alloc interface") Signed-off-by: Darrick J. Wong <[email protected]> Reviewed-by: Dave Chinner <[email protected]> Tested-by: Salvatore Bonaccorso <[email protected]>
xfs_setup_iops( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; if (IS_DAX(inode)) inode->i_mapping->a_ops = &xfs_dax_aops; else inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: if (ip->i_df.if_flags & XFS_IFINLINE) inode->i_op = &xfs_inline_symlink_inode_operations; else inode->i_op = &xfs_symlink_inode_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } }
xfs_setup_iops( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; if (IS_DAX(inode)) inode->i_mapping->a_ops = &xfs_dax_aops; else inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: if (ip->i_df.if_flags & XFS_IFINLINE) inode->i_op = &xfs_inline_symlink_inode_operations; else inode->i_op = &xfs_symlink_inode_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } }
C
linux
0
CVE-2016-3760
https://www.cvedetails.com/cve/CVE-2016-3760/
CWE-20
https://android.googlesource.com/platform/packages/apps/Bluetooth/+/122feb9a0b04290f55183ff2f0384c6c53756bd8
122feb9a0b04290f55183ff2f0384c6c53756bd8
Add guest mode functionality (3/3) Add a flag to enable() to start Bluetooth in restricted mode. In restricted mode, all devices that are paired during restricted mode are deleted upon leaving restricted mode. Right now restricted mode is only entered while a guest user is active. Bug: 27410683 Change-Id: If4a8855faf362d7f6de509d7ddc7197d1ac75cee
static int getConnectionStateNative(JNIEnv* env, jobject obj, jbyteArray address) { ALOGV("%s:",__FUNCTION__); if (!sBluetoothInterface) return JNI_FALSE; jbyte *addr = env->GetByteArrayElements(address, NULL); if (addr == NULL) { jniThrowIOException(env, EINVAL); return JNI_FALSE; } int ret = sBluetoothInterface->get_connection_state((bt_bdaddr_t *)addr); env->ReleaseByteArrayElements(address, addr, 0); return ret; }
static int getConnectionStateNative(JNIEnv* env, jobject obj, jbyteArray address) { ALOGV("%s:",__FUNCTION__); if (!sBluetoothInterface) return JNI_FALSE; jbyte *addr = env->GetByteArrayElements(address, NULL); if (addr == NULL) { jniThrowIOException(env, EINVAL); return JNI_FALSE; } int ret = sBluetoothInterface->get_connection_state((bt_bdaddr_t *)addr); env->ReleaseByteArrayElements(address, addr, 0); return ret; }
C
Android
0
CVE-2017-5120
https://www.cvedetails.com/cve/CVE-2017-5120/
null
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
b7277af490d28ac7f802c015bb0ff31395768556
bindings: Support "attribute FrozenArray<T>?" Adds a quick hack to support a case of "attribute FrozenArray<T>?". Bug: 1028047 Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866 Reviewed-by: Hitoshi Yoshida <[email protected]> Commit-Queue: Yuki Shiino <[email protected]> Cr-Commit-Position: refs/heads/master@{#718676}
static void CallWithScriptStateVoidMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObject* impl = V8TestObject::ToImpl(info.Holder()); ScriptState* script_state = ScriptState::ForRelevantRealm(info); impl->callWithScriptStateVoidMethod(script_state); }
static void CallWithScriptStateVoidMethodMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObject* impl = V8TestObject::ToImpl(info.Holder()); ScriptState* script_state = ScriptState::ForRelevantRealm(info); impl->callWithScriptStateVoidMethod(script_state); }
C
Chrome
0
CVE-2013-2548
https://www.cvedetails.com/cve/CVE-2013-2548/
CWE-310
https://github.com/torvalds/linux/commit/9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
9a5467bf7b6e9e02ec9c3da4e23747c05faeaac6
crypto: user - fix info leaks in report API Three errors resulting in kernel memory disclosure: 1/ The structures used for the netlink based crypto algorithm report API are located on the stack. As snprintf() does not fill the remainder of the buffer with null bytes, those stack bytes will be disclosed to users of the API. Switch to strncpy() to fix this. 2/ crypto_report_one() does not initialize all field of struct crypto_user_alg. Fix this to fix the heap info leak. 3/ For the module name we should copy only as many bytes as module_name() returns -- not as much as the destination buffer could hold. But the current code does not and therefore copies random data from behind the end of the module name, as the module name is always shorter than CRYPTO_MAX_ALG_NAME. Also switch to use strncpy() to copy the algorithm's name and driver_name. They are strings, after all. Signed-off-by: Mathias Krause <[email protected]> Cc: Steffen Klassert <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
static inline int ablkcipher_next_fast(struct ablkcipher_request *req, struct ablkcipher_walk *walk) { walk->src.page = scatterwalk_page(&walk->in); walk->src.offset = offset_in_page(walk->in.offset); walk->dst.page = scatterwalk_page(&walk->out); walk->dst.offset = offset_in_page(walk->out.offset); return 0; }
static inline int ablkcipher_next_fast(struct ablkcipher_request *req, struct ablkcipher_walk *walk) { walk->src.page = scatterwalk_page(&walk->in); walk->src.offset = offset_in_page(walk->in.offset); walk->dst.page = scatterwalk_page(&walk->out); walk->dst.offset = offset_in_page(walk->out.offset); return 0; }
C
linux
0
CVE-2018-7186
https://www.cvedetails.com/cve/CVE-2018-7186/
CWE-119
https://github.com/DanBloomberg/leptonica/commit/ee301cb2029db8a6289c5295daa42bba7715e99a
ee301cb2029db8a6289c5295daa42bba7715e99a
Security fixes: expect final changes for release 1.75.3. * Fixed a debian security issue with fscanf() reading a string with possible buffer overflow. * There were also a few similar situations with sscanf().
selCopy(SEL *sel) { l_int32 sx, sy, cx, cy, i, j; SEL *csel; PROCNAME("selCopy"); if (!sel) return (SEL *)ERROR_PTR("sel not defined", procName, NULL); if ((csel = (SEL *)LEPT_CALLOC(1, sizeof(SEL))) == NULL) return (SEL *)ERROR_PTR("csel not made", procName, NULL); selGetParameters(sel, &sy, &sx, &cy, &cx); csel->sy = sy; csel->sx = sx; csel->cy = cy; csel->cx = cx; if ((csel->data = create2dIntArray(sy, sx)) == NULL) { LEPT_FREE(csel); return (SEL *)ERROR_PTR("sel data not made", procName, NULL); } for (i = 0; i < sy; i++) for (j = 0; j < sx; j++) csel->data[i][j] = sel->data[i][j]; if (sel->name) csel->name = stringNew(sel->name); return csel; }
selCopy(SEL *sel) { l_int32 sx, sy, cx, cy, i, j; SEL *csel; PROCNAME("selCopy"); if (!sel) return (SEL *)ERROR_PTR("sel not defined", procName, NULL); if ((csel = (SEL *)LEPT_CALLOC(1, sizeof(SEL))) == NULL) return (SEL *)ERROR_PTR("csel not made", procName, NULL); selGetParameters(sel, &sy, &sx, &cy, &cx); csel->sy = sy; csel->sx = sx; csel->cy = cy; csel->cx = cx; if ((csel->data = create2dIntArray(sy, sx)) == NULL) { LEPT_FREE(csel); return (SEL *)ERROR_PTR("sel data not made", procName, NULL); } for (i = 0; i < sy; i++) for (j = 0; j < sx; j++) csel->data[i][j] = sel->data[i][j]; if (sel->name) csel->name = stringNew(sel->name); return csel; }
C
leptonica
0
CVE-2018-16427
https://www.cvedetails.com/cve/CVE-2018-16427/
CWE-125
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
construct_mac_tlv(struct sc_card *card, unsigned char *apdu_buf, size_t data_tlv_len, size_t le_tlv_len, unsigned char *mac_tlv, size_t * mac_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); unsigned char mac[4096] = { 0 }; size_t mac_len; unsigned char icv[16] = { 0 }; int i = (KEY_TYPE_AES == key_type ? 15 : 7); epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if (0 == data_tlv_len && 0 == le_tlv_len) { mac_len = block_size; } else { /* padding */ *(apdu_buf + block_size + data_tlv_len + le_tlv_len) = 0x80; if ((data_tlv_len + le_tlv_len + 1) % block_size) mac_len = (((data_tlv_len + le_tlv_len + 1) / block_size) + 1) * block_size + block_size; else mac_len = data_tlv_len + le_tlv_len + 1 + block_size; memset((apdu_buf + block_size + data_tlv_len + le_tlv_len + 1), 0, (mac_len - (data_tlv_len + le_tlv_len + 1))); } /* increase icv */ for (; i >= 0; i--) { if (exdata->icv_mac[i] == 0xff) { exdata->icv_mac[i] = 0; } else { exdata->icv_mac[i]++; break; } } /* calculate MAC */ memset(icv, 0, sizeof(icv)); memcpy(icv, exdata->icv_mac, 16); if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_mac, 16, icv, apdu_buf, mac_len, mac); memcpy(mac_tlv + 2, &mac[mac_len - 16], 8); } else { unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 }; unsigned char tmp[8] = { 0 }; des_encrypt_cbc(exdata->sk_mac, 8, icv, apdu_buf, mac_len, mac); des_decrypt_cbc(&exdata->sk_mac[8], 8, iv, &mac[mac_len - 8], 8, tmp); memset(iv, 0x00, sizeof iv); des_encrypt_cbc(exdata->sk_mac, 8, iv, tmp, 8, mac_tlv + 2); } *mac_tlv_len = 2 + 8; return 0; }
construct_mac_tlv(struct sc_card *card, unsigned char *apdu_buf, size_t data_tlv_len, size_t le_tlv_len, unsigned char *mac_tlv, size_t * mac_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); unsigned char mac[4096] = { 0 }; size_t mac_len; unsigned char icv[16] = { 0 }; int i = (KEY_TYPE_AES == key_type ? 15 : 7); epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if (0 == data_tlv_len && 0 == le_tlv_len) { mac_len = block_size; } else { /* padding */ *(apdu_buf + block_size + data_tlv_len + le_tlv_len) = 0x80; if ((data_tlv_len + le_tlv_len + 1) % block_size) mac_len = (((data_tlv_len + le_tlv_len + 1) / block_size) + 1) * block_size + block_size; else mac_len = data_tlv_len + le_tlv_len + 1 + block_size; memset((apdu_buf + block_size + data_tlv_len + le_tlv_len + 1), 0, (mac_len - (data_tlv_len + le_tlv_len + 1))); } /* increase icv */ for (; i >= 0; i--) { if (exdata->icv_mac[i] == 0xff) { exdata->icv_mac[i] = 0; } else { exdata->icv_mac[i]++; break; } } /* calculate MAC */ memset(icv, 0, sizeof(icv)); memcpy(icv, exdata->icv_mac, 16); if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_mac, 16, icv, apdu_buf, mac_len, mac); memcpy(mac_tlv + 2, &mac[mac_len - 16], 8); } else { unsigned char iv[8] = { 0 }; unsigned char tmp[8] = { 0 }; des_encrypt_cbc(exdata->sk_mac, 8, icv, apdu_buf, mac_len, mac); des_decrypt_cbc(&exdata->sk_mac[8], 8, iv, &mac[mac_len - 8], 8, tmp); memset(iv, 0x00, 8); des_encrypt_cbc(exdata->sk_mac, 8, iv, tmp, 8, mac_tlv + 2); } *mac_tlv_len = 2 + 8; return 0; }
C
OpenSC
1
CVE-2010-2520
https://www.cvedetails.com/cve/CVE-2010-2520/
CWE-119
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=888cd1843e935fe675cf2ac303116d4ed5b9d54b
888cd1843e935fe675cf2ac303116d4ed5b9d54b
null
Ins_ROLL( INS_ARG ) { FT_Long A, B, C; FT_UNUSED_EXEC; A = args[2]; B = args[1]; C = args[0]; args[2] = C; args[1] = A; args[0] = B; }
Ins_ROLL( INS_ARG ) { FT_Long A, B, C; FT_UNUSED_EXEC; A = args[2]; B = args[1]; C = args[0]; args[2] = C; args[1] = A; args[0] = B; }
C
savannah
0
CVE-2017-7533
https://www.cvedetails.com/cve/CVE-2017-7533/
CWE-362
https://github.com/torvalds/linux/commit/49d31c2f389acfe83417083e1208422b4091cd9e
49d31c2f389acfe83417083e1208422b4091cd9e
dentry name snapshots take_dentry_name_snapshot() takes a safe snapshot of dentry name; if the name is a short one, it gets copied into caller-supplied structure, otherwise an extra reference to external name is grabbed (those are never modified). In either case the pointer to stable string is stored into the same structure. dentry must be held by the caller of take_dentry_name_snapshot(), but may be freely dropped afterwards - the snapshot will stay until destroyed by release_dentry_name_snapshot(). Intended use: struct name_snapshot s; take_dentry_name_snapshot(&s, dentry); ... access s.name ... release_dentry_name_snapshot(&s); Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name to pass down with event. Signed-off-by: Al Viro <[email protected]>
static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file) { struct path path; int error = path_lookupat(nd, flags, &path); if (!error) { audit_inode(nd->name, path.dentry, 0); error = vfs_open(&path, file, current_cred()); path_put(&path); } return error; }
static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file) { struct path path; int error = path_lookupat(nd, flags, &path); if (!error) { audit_inode(nd->name, path.dentry, 0); error = vfs_open(&path, file, current_cred()); path_put(&path); } return error; }
C
linux
0
CVE-2018-12896
https://www.cvedetails.com/cve/CVE-2018-12896/
CWE-190
https://github.com/torvalds/linux/commit/78c9c4dfbf8c04883941445a195276bb4bb92c76
78c9c4dfbf8c04883941445a195276bb4bb92c76
posix-timers: Sanitize overrun handling The posix timer overrun handling is broken because the forwarding functions can return a huge number of overruns which does not fit in an int. As a consequence timer_getoverrun(2) and siginfo::si_overrun can turn into random number generators. The k_clock::timer_forward() callbacks return a 64 bit value now. Make k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal accounting is correct. 3Remove the temporary (int) casts. Add a helper function which clamps the overrun value returned to user space via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value between 0 and INT_MAX. INT_MAX is an indicator for user space that the overrun value has been clamped. Reported-by: Team OWL337 <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: John Stultz <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Michael Kerrisk <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) { *tp = ktime_to_timespec64(KTIME_LOW_RES); return 0; }
static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) { *tp = ktime_to_timespec64(KTIME_LOW_RES); return 0; }
C
linux
0
CVE-2011-2495
https://www.cvedetails.com/cve/CVE-2011-2495/
CWE-264
https://github.com/torvalds/linux/commit/1d1221f375c94ef961ba8574ac4f85c8870ddd51
1d1221f375c94ef961ba8574ac4f85c8870ddd51
proc: restrict access to /proc/PID/io /proc/PID/io may be used for gathering private information. E.g. for openssh and vsftpd daemons wchars/rchars may be used to learn the precise password length. Restrict it to processes being able to ptrace the target process. ptrace_may_access() is needed to prevent keeping open file descriptor of "io" file, executing setuid binary and gathering io information of the setuid'ed process. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void unlock_trace(struct task_struct *task) { mutex_unlock(&task->signal->cred_guard_mutex); }
static void unlock_trace(struct task_struct *task) { mutex_unlock(&task->signal->cred_guard_mutex); }
C
linux
0
CVE-2017-5104
https://www.cvedetails.com/cve/CVE-2017-5104/
CWE-20
https://github.com/chromium/chromium/commit/adca986a53b31b6da4cb22f8e755f6856daea89a
adca986a53b31b6da4cb22f8e755f6856daea89a
Don't show current RenderWidgetHostView while interstitial is showing. Also moves interstitial page tracking from RenderFrameHostManager to WebContents, since interstitial pages are not frame-specific. This was necessary for subframes to detect if an interstitial page is showing. BUG=729105 TEST=See comment 13 of bug for repro steps CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_site_isolation Review-Url: https://codereview.chromium.org/2938313002 Cr-Commit-Position: refs/heads/master@{#480117}
void InterstitialPageImpl::UnderlyingContentObserver::WebContentsDestroyed() { interstitial_->OnNavigatingAwayOrTabClosing(); }
void InterstitialPageImpl::UnderlyingContentObserver::WebContentsDestroyed() { interstitial_->OnNavigatingAwayOrTabClosing(); }
C
Chrome
0
CVE-2015-6763
https://www.cvedetails.com/cve/CVE-2015-6763/
null
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
MacViews: Enable secure text input for password Textfields. In Cocoa the NSTextInputContext automatically enables secure text input when activated and it's in the secure text entry mode. RenderWidgetHostViewMac did the similar thing for ages following the WebKit example. views::Textfield needs to do the same thing in a fashion that's sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions are possible when the Textfield gets focus, activates the secure text input mode and the RWHVM loses focus immediately afterwards and disables the secure text input instead of leaving it in the enabled state. BUG=818133,677220 Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b Reviewed-on: https://chromium-review.googlesource.com/943064 Commit-Queue: Michail Pishchagin <[email protected]> Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Avi Drissman <[email protected]> Reviewed-by: Peter Kasting <[email protected]> Cr-Commit-Position: refs/heads/master@{#542517}
bool Textfield::OnMousePressed(const ui::MouseEvent& event) { const bool had_focus = HasFocus(); bool handled = controller_ && controller_->HandleMouseEvent(this, event); if (!handled && (event.IsOnlyLeftMouseButton() || event.IsOnlyRightMouseButton())) { if (!had_focus) RequestFocus(); ShowImeIfNeeded(); } #if defined(OS_LINUX) && !defined(OS_CHROMEOS) if (!handled && !had_focus && event.IsOnlyMiddleMouseButton()) RequestFocus(); #endif return selection_controller_.OnMousePressed( event, handled, had_focus ? SelectionController::FOCUSED : SelectionController::UNFOCUSED); }
bool Textfield::OnMousePressed(const ui::MouseEvent& event) { const bool had_focus = HasFocus(); bool handled = controller_ && controller_->HandleMouseEvent(this, event); if (!handled && (event.IsOnlyLeftMouseButton() || event.IsOnlyRightMouseButton())) { if (!had_focus) RequestFocus(); ShowImeIfNeeded(); } #if defined(OS_LINUX) && !defined(OS_CHROMEOS) if (!handled && !had_focus && event.IsOnlyMiddleMouseButton()) RequestFocus(); #endif return selection_controller_.OnMousePressed( event, handled, had_focus ? SelectionController::FOCUSED : SelectionController::UNFOCUSED); }
C
Chrome
0
CVE-2014-1743
https://www.cvedetails.com/cve/CVE-2014-1743/
CWE-399
https://github.com/chromium/chromium/commit/6d9425ec7badda912555d46ea7abcfab81fdd9b9
6d9425ec7badda912555d46ea7abcfab81fdd9b9
sync compositor: pass simple gfx types by const ref See bug for reasoning BUG=159273 Review URL: https://codereview.chromium.org/1417893006 Cr-Commit-Position: refs/heads/master@{#356653}
bool SynchronousCompositorOutputSurface::CalledOnValidThread() const { return thread_checker_.CalledOnValidThread(); }
bool SynchronousCompositorOutputSurface::CalledOnValidThread() const { return thread_checker_.CalledOnValidThread(); }
C
Chrome
0
CVE-2012-2143
https://www.cvedetails.com/cve/CVE-2012-2143/
CWE-310
https://git.postgresql.org/gitweb/?p=postgresql.git&a=commit&h=932ded2ed51e8333852e370c7a6dad75d9f236f9
932ded2ed51e8333852e370c7a6dad75d9f236f9
null
px_crypt_des(const char *key, const char *setting) { int i; uint32 count, salt, l, r0, r1, keybuf[2]; char *p; uint8 *q; static char output[21]; if (!des_initialised) des_init(); /* * Copy the key, shifting each character up by one bit and padding with * zeros. */ q = (uint8 *) keybuf; while (q - (uint8 *) keybuf - 8) { *q++ = *key << 1; if (*key != '\0') key++; } if (des_setkey((char *) keybuf)) #ifndef DISABLE_XDES if (*setting == _PASSWORD_EFMT1) { /* * "new"-style: setting - underscore, 4 bytes of count, 4 bytes of * salt key - unlimited characters */ for (i = 1, count = 0L; i < 5; i++) count |= ascii_to_bin(setting[i]) << (i - 1) * 6; for (i = 5, salt = 0L; i < 9; i++) salt |= ascii_to_bin(setting[i]) << (i - 5) * 6; while (*key) { /* * Encrypt the key with itself. */ if (des_cipher((char *) keybuf, (char *) keybuf, 0L, 1)) return (NULL); /* * And XOR with the next 8 characters of the key. */ q = (uint8 *) keybuf; while (q - (uint8 *) keybuf - 8 && *key) *q++ ^= *key++ << 1; if (des_setkey((char *) keybuf)) return (NULL); } strncpy(output, setting, 9); /* * Double check that we weren't given a short setting. If we were, the * above code will probably have created weird values for count and * salt, but we don't really care. Just make sure the output string * doesn't have an extra NUL in it. */ output[9] = '\0'; p = output + strlen(output); } else #endif /* !DISABLE_XDES */ { /* * "old"-style: setting - 2 bytes of salt key - up to 8 characters */ count = 25; salt = (ascii_to_bin(setting[1]) << 6) | ascii_to_bin(setting[0]); output[0] = setting[0]; /* * If the encrypted password that the salt was extracted from is only * 1 character long, the salt will be corrupted. We need to ensure * that the output string doesn't have an extra NUL in it! */ output[1] = setting[1] ? setting[1] : output[0]; p = output + 2; } setup_salt(salt); /* * Do it. */ if (do_des(0L, 0L, &r0, &r1, count)) return (NULL); /* * Now encode the result... */ l = (r0 >> 8); *p++ = _crypt_a64[(l >> 18) & 0x3f]; *p++ = _crypt_a64[(l >> 12) & 0x3f]; *p++ = _crypt_a64[(l >> 6) & 0x3f]; *p++ = _crypt_a64[l & 0x3f]; l = (r0 << 16) | ((r1 >> 16) & 0xffff); *p++ = _crypt_a64[(l >> 18) & 0x3f]; *p++ = _crypt_a64[(l >> 12) & 0x3f]; *p++ = _crypt_a64[(l >> 6) & 0x3f]; *p++ = _crypt_a64[l & 0x3f]; l = r1 << 2; *p++ = _crypt_a64[(l >> 12) & 0x3f]; *p++ = _crypt_a64[(l >> 6) & 0x3f]; *p++ = _crypt_a64[l & 0x3f]; *p = 0; return (output); }
px_crypt_des(const char *key, const char *setting) { int i; uint32 count, salt, l, r0, r1, keybuf[2]; char *p; uint8 *q; static char output[21]; if (!des_initialised) des_init(); /* * Copy the key, shifting each character up by one bit and padding with * zeros. */ q = (uint8 *) keybuf; while (q - (uint8 *) keybuf - 8) { if ((*q++ = *key << 1)) key++; } if (des_setkey((char *) keybuf)) #ifndef DISABLE_XDES if (*setting == _PASSWORD_EFMT1) { /* * "new"-style: setting - underscore, 4 bytes of count, 4 bytes of * salt key - unlimited characters */ for (i = 1, count = 0L; i < 5; i++) count |= ascii_to_bin(setting[i]) << (i - 1) * 6; for (i = 5, salt = 0L; i < 9; i++) salt |= ascii_to_bin(setting[i]) << (i - 5) * 6; while (*key) { /* * Encrypt the key with itself. */ if (des_cipher((char *) keybuf, (char *) keybuf, 0L, 1)) return (NULL); /* * And XOR with the next 8 characters of the key. */ q = (uint8 *) keybuf; while (q - (uint8 *) keybuf - 8 && *key) *q++ ^= *key++ << 1; if (des_setkey((char *) keybuf)) return (NULL); } strncpy(output, setting, 9); /* * Double check that we weren't given a short setting. If we were, the * above code will probably have created weird values for count and * salt, but we don't really care. Just make sure the output string * doesn't have an extra NUL in it. */ output[9] = '\0'; p = output + strlen(output); } else #endif /* !DISABLE_XDES */ { /* * "old"-style: setting - 2 bytes of salt key - up to 8 characters */ count = 25; salt = (ascii_to_bin(setting[1]) << 6) | ascii_to_bin(setting[0]); output[0] = setting[0]; /* * If the encrypted password that the salt was extracted from is only * 1 character long, the salt will be corrupted. We need to ensure * that the output string doesn't have an extra NUL in it! */ output[1] = setting[1] ? setting[1] : output[0]; p = output + 2; } setup_salt(salt); /* * Do it. */ if (do_des(0L, 0L, &r0, &r1, count)) return (NULL); /* * Now encode the result... */ l = (r0 >> 8); *p++ = _crypt_a64[(l >> 18) & 0x3f]; *p++ = _crypt_a64[(l >> 12) & 0x3f]; *p++ = _crypt_a64[(l >> 6) & 0x3f]; *p++ = _crypt_a64[l & 0x3f]; l = (r0 << 16) | ((r1 >> 16) & 0xffff); *p++ = _crypt_a64[(l >> 18) & 0x3f]; *p++ = _crypt_a64[(l >> 12) & 0x3f]; *p++ = _crypt_a64[(l >> 6) & 0x3f]; *p++ = _crypt_a64[l & 0x3f]; l = r1 << 2; *p++ = _crypt_a64[(l >> 12) & 0x3f]; *p++ = _crypt_a64[(l >> 6) & 0x3f]; *p++ = _crypt_a64[l & 0x3f]; *p = 0; return (output); }
C
postgresql
1
CVE-2017-0380
https://www.cvedetails.com/cve/CVE-2017-0380/
CWE-532
https://github.com/torproject/tor/commit/09ea89764a4d3a907808ed7d4fe42abfe64bd486
09ea89764a4d3a907808ed7d4fe42abfe64bd486
Fix log-uninitialized-stack bug in rend_service_intro_established. Fixes bug 23490; bugfix on 0.2.7.2-alpha. TROVE-2017-008 CVE-2017-0380
rend_service_requires_uptime(rend_service_t *service) { int i; rend_service_port_config_t *p; for (i=0; i < smartlist_len(service->ports); ++i) { p = smartlist_get(service->ports, i); if (smartlist_contains_int_as_string(get_options()->LongLivedPorts, p->virtual_port)) return 1; } return 0; }
rend_service_requires_uptime(rend_service_t *service) { int i; rend_service_port_config_t *p; for (i=0; i < smartlist_len(service->ports); ++i) { p = smartlist_get(service->ports, i); if (smartlist_contains_int_as_string(get_options()->LongLivedPorts, p->virtual_port)) return 1; } return 0; }
C
tor
0
CVE-2016-3861
https://www.cvedetails.com/cve/CVE-2016-3861/
CWE-119
https://android.googlesource.com/platform/system/core/+/ecf5fd58a8f50362ce9e8d4245a33d56f29f142b
ecf5fd58a8f50362ce9e8d4245a33d56f29f142b
libutils/Unicode.cpp: Correct length computation and add checks for utf16->utf8 Inconsistent behaviour between utf16_to_utf8 and utf16_to_utf8_length is causing a heap overflow. Correcting the length computation and adding bound checks to the conversion functions. Test: ran libutils_tests Bug: 29250543 Change-Id: I6115e3357141ed245c63c6eb25fc0fd0a9a7a2bb (cherry picked from commit c4966a363e46d2e1074d1a365e232af0dcedd6a1)
char* String8::lockBuffer(size_t size) { SharedBuffer* buf = SharedBuffer::bufferFromData(mString) ->editResize(size+1); if (buf) { char* str = (char*)buf->data(); mString = str; return str; } return NULL; }
char* String8::lockBuffer(size_t size) { SharedBuffer* buf = SharedBuffer::bufferFromData(mString) ->editResize(size+1); if (buf) { char* str = (char*)buf->data(); mString = str; return str; } return NULL; }
C
Android
0
CVE-2016-1665
https://www.cvedetails.com/cve/CVE-2016-1665/
CWE-20
https://github.com/chromium/chromium/commit/282f53ffdc3b1902da86f6a0791af736837efbf8
282f53ffdc3b1902da86f6a0791af736837efbf8
[signin] Add metrics to track the source for refresh token updated events This CL add a source for update and revoke credentials operations. It then surfaces the source in the chrome://signin-internals page. This CL also records the following histograms that track refresh token events: * Signin.RefreshTokenUpdated.ToValidToken.Source * Signin.RefreshTokenUpdated.ToInvalidToken.Source * Signin.RefreshTokenRevoked.Source These histograms are needed to validate the assumptions of how often tokens are revoked by the browser and the sources for the token revocations. Bug: 896182 Change-Id: I2fcab80ee8e5699708e695bc3289fa6d34859a90 Reviewed-on: https://chromium-review.googlesource.com/c/1286464 Reviewed-by: Jochen Eisinger <[email protected]> Reviewed-by: David Roger <[email protected]> Reviewed-by: Ilya Sherman <[email protected]> Commit-Queue: Mihai Sardarescu <[email protected]> Cr-Commit-Position: refs/heads/master@{#606181}
base::FilePath GetBlacklistPath() { base::FilePath blacklist_dir; base::PathService::Get(chrome::DIR_USER_DATA, &blacklist_dir); return blacklist_dir.AppendASCII(kBlacklistFilename); }
base::FilePath GetBlacklistPath() { base::FilePath blacklist_dir; base::PathService::Get(chrome::DIR_USER_DATA, &blacklist_dir); return blacklist_dir.AppendASCII(kBlacklistFilename); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/1161a49d663dd395bd639549c2dfe7324f847938
1161a49d663dd395bd639549c2dfe7324f847938
Don't populate URL data in WebDropData when dragging files. This is considered a potential security issue as well, since it leaks filesystem paths. BUG=332579 Review URL: https://codereview.chromium.org/135633002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@244538 0039d316-1c4b-4281-b951-d872f2087c98
Tab* TabStrip::FindTabForEventFrom(const gfx::Point& point, int start, int delta) { if (start == tab_count()) start += delta; for (int i = start; i >= 0 && i < tab_count(); i += delta) { if (IsPointInTab(tab_at(i), point)) return tab_at(i); } return NULL; }
Tab* TabStrip::FindTabForEventFrom(const gfx::Point& point, int start, int delta) { if (start == tab_count()) start += delta; for (int i = start; i >= 0 && i < tab_count(); i += delta) { if (IsPointInTab(tab_at(i), point)) return tab_at(i); } return NULL; }
C
Chrome
0
CVE-2018-17206
https://www.cvedetails.com/cve/CVE-2018-17206/
null
https://github.com/openvswitch/ovs/commit/9237a63c47bd314b807cda0bd2216264e82edbe8
9237a63c47bd314b807cda0bd2216264e82edbe8
ofp-actions: Avoid buffer overread in BUNDLE action decoding. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9052 Signed-off-by: Ben Pfaff <[email protected]> Acked-by: Justin Pettit <[email protected]>
decode_NXAST_RAW_RESUBMIT_TABLE(const struct nx_action_resubmit *nar, enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out) { struct ofpact_resubmit *resubmit; if (nar->pad[0] || nar->pad[1] || nar->pad[2]) { return OFPERR_OFPBAC_BAD_ARGUMENT; } resubmit = ofpact_put_RESUBMIT(out); resubmit->ofpact.raw = NXAST_RAW_RESUBMIT_TABLE; resubmit->in_port = u16_to_ofp(ntohs(nar->in_port)); resubmit->table_id = nar->table; return 0; }
decode_NXAST_RAW_RESUBMIT_TABLE(const struct nx_action_resubmit *nar, enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out) { struct ofpact_resubmit *resubmit; if (nar->pad[0] || nar->pad[1] || nar->pad[2]) { return OFPERR_OFPBAC_BAD_ARGUMENT; } resubmit = ofpact_put_RESUBMIT(out); resubmit->ofpact.raw = NXAST_RAW_RESUBMIT_TABLE; resubmit->in_port = u16_to_ofp(ntohs(nar->in_port)); resubmit->table_id = nar->table; return 0; }
C
ovs
0
CVE-2015-1335
https://www.cvedetails.com/cve/CVE-2015-1335/
CWE-59
https://github.com/lxc/lxc/commit/592fd47a6245508b79fe6ac819fe6d3b2c1289be
592fd47a6245508b79fe6ac819fe6d3b2c1289be
CVE-2015-1335: Protect container mounts against symlinks When a container starts up, lxc sets up the container's inital fstree by doing a bunch of mounting, guided by the container configuration file. The container config is owned by the admin or user on the host, so we do not try to guard against bad entries. However, since the mount target is in the container, it's possible that the container admin could divert the mount with symbolic links. This could bypass proper container startup (i.e. confinement of a root-owned container by the restrictive apparmor policy, by diverting the required write to /proc/self/attr/current), or bypass the (path-based) apparmor policy by diverting, say, /proc to /mnt in the container. To prevent this, 1. do not allow mounts to paths containing symbolic links 2. do not allow bind mounts from relative paths containing symbolic links. Details: Define safe_mount which ensures that the container has not inserted any symbolic links into any mount targets for mounts to be done during container setup. The host's mount path may contain symbolic links. As it is under the control of the administrator, that's ok. So safe_mount begins the check for symbolic links after the rootfs->mount, by opening that directory. It opens each directory along the path using openat() relative to the parent directory using O_NOFOLLOW. When the target is reached, it mounts onto /proc/self/fd/<targetfd>. Use safe_mount() in mount_entry(), when mounting container proc, and when needed. In particular, safe_mount() need not be used in any case where: 1. the mount is done in the container's namespace 2. the mount is for the container's rootfs 3. the mount is relative to a tmpfs or proc/sysfs which we have just safe_mount()ed ourselves Since we were using proc/net as a temporary placeholder for /proc/sys/net during container startup, and proc/net is a symbolic link, use proc/tty instead. Update the lxc.container.conf manpage with details about the new restrictions. Finally, add a testcase to test some symbolic link possibilities. Reported-by: Roman Fiedler Signed-off-by: Serge Hallyn <[email protected]> Acked-by: Stéphane Graber <[email protected]>
static int lxc_mount_auto_mounts(struct lxc_conf *conf, int flags, struct lxc_handler *handler) { int r; size_t i; static struct { int match_mask; int match_flag; const char *source; const char *destination; const char *fstype; unsigned long flags; const char *options; } default_mounts[] = { /* Read-only bind-mounting... In older kernels, doing that required * to do one MS_BIND mount and then MS_REMOUNT|MS_RDONLY the same * one. According to mount(2) manpage, MS_BIND honors MS_RDONLY from * kernel 2.6.26 onwards. However, this apparently does not work on * kernel 3.8. Unfortunately, on that very same kernel, doing the * same trick as above doesn't seem to work either, there one needs * to ALSO specify MS_BIND for the remount, otherwise the entire * fs is remounted read-only or the mount fails because it's busy... * MS_REMOUNT|MS_BIND|MS_RDONLY seems to work for kernels as low as * 2.6.32... */ { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "proc", "%r/proc", "proc", MS_NODEV|MS_NOEXEC|MS_NOSUID, NULL }, /* proc/tty is used as a temporary placeholder for proc/sys/net which we'll move back in a few steps */ { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/sys/net", "%r/proc/tty", NULL, MS_BIND, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/sys", "%r/proc/sys", NULL, MS_BIND, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, NULL, "%r/proc/sys", NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/tty", "%r/proc/sys/net", NULL, MS_MOVE, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/sysrq-trigger", "%r/proc/sysrq-trigger", NULL, MS_BIND, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, NULL, "%r/proc/sysrq-trigger", NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_RW, "proc", "%r/proc", "proc", MS_NODEV|MS_NOEXEC|MS_NOSUID, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_RW, "sysfs", "%r/sys", "sysfs", 0, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_RO, "sysfs", "%r/sys", "sysfs", MS_RDONLY, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "sysfs", "%r/sys", "sysfs", MS_NODEV|MS_NOEXEC|MS_NOSUID, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "%r/sys", "%r/sys", NULL, MS_BIND, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, NULL, "%r/sys", NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "sysfs", "%r/sys/devices/virtual/net", "sysfs", 0, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "%r/sys/devices/virtual/net/devices/virtual/net", "%r/sys/devices/virtual/net", NULL, MS_BIND, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, NULL, "%r/sys/devices/virtual/net", NULL, MS_REMOUNT|MS_BIND|MS_NOSUID|MS_NODEV|MS_NOEXEC, NULL }, { 0, 0, NULL, NULL, NULL, 0, NULL } }; for (i = 0; default_mounts[i].match_mask; i++) { if ((flags & default_mounts[i].match_mask) == default_mounts[i].match_flag) { char *source = NULL; char *destination = NULL; int saved_errno; unsigned long mflags; if (default_mounts[i].source) { /* will act like strdup if %r is not present */ source = lxc_string_replace("%r", conf->rootfs.path ? conf->rootfs.mount : "", default_mounts[i].source); if (!source) { SYSERROR("memory allocation error"); return -1; } } if (default_mounts[i].destination) { /* will act like strdup if %r is not present */ destination = lxc_string_replace("%r", conf->rootfs.path ? conf->rootfs.mount : "", default_mounts[i].destination); if (!destination) { saved_errno = errno; SYSERROR("memory allocation error"); free(source); errno = saved_errno; return -1; } } mflags = add_required_remount_flags(source, destination, default_mounts[i].flags); r = safe_mount(source, destination, default_mounts[i].fstype, mflags, default_mounts[i].options, conf->rootfs.path ? conf->rootfs.mount : NULL); saved_errno = errno; if (r < 0 && errno == ENOENT) { INFO("Mount source or target for %s on %s doesn't exist. Skipping.", source, destination); r = 0; } else if (r < 0) SYSERROR("error mounting %s on %s flags %lu", source, destination, mflags); free(source); free(destination); if (r < 0) { errno = saved_errno; return -1; } } } if (flags & LXC_AUTO_CGROUP_MASK) { int cg_flags; cg_flags = flags & LXC_AUTO_CGROUP_MASK; /* If the type of cgroup mount was not specified, it depends on the * container's capabilities as to what makes sense: if we have * CAP_SYS_ADMIN, the read-only part can be remounted read-write * anyway, so we may as well default to read-write; then the admin * will not be given a false sense of security. (And if they really * want mixed r/o r/w, then they can explicitly specify :mixed.) * OTOH, if the container lacks CAP_SYS_ADMIN, do only default to * :mixed, because then the container can't remount it read-write. */ if (cg_flags == LXC_AUTO_CGROUP_NOSPEC || cg_flags == LXC_AUTO_CGROUP_FULL_NOSPEC) { int has_sys_admin = 0; if (!lxc_list_empty(&conf->keepcaps)) { has_sys_admin = in_caplist(CAP_SYS_ADMIN, &conf->keepcaps); } else { has_sys_admin = !in_caplist(CAP_SYS_ADMIN, &conf->caps); } if (cg_flags == LXC_AUTO_CGROUP_NOSPEC) { cg_flags = has_sys_admin ? LXC_AUTO_CGROUP_RW : LXC_AUTO_CGROUP_MIXED; } else { cg_flags = has_sys_admin ? LXC_AUTO_CGROUP_FULL_RW : LXC_AUTO_CGROUP_FULL_MIXED; } } if (!cgroup_mount(conf->rootfs.path ? conf->rootfs.mount : "", handler, cg_flags)) { SYSERROR("error mounting /sys/fs/cgroup"); return -1; } } return 0; }
static int lxc_mount_auto_mounts(struct lxc_conf *conf, int flags, struct lxc_handler *handler) { int r; size_t i; static struct { int match_mask; int match_flag; const char *source; const char *destination; const char *fstype; unsigned long flags; const char *options; } default_mounts[] = { /* Read-only bind-mounting... In older kernels, doing that required * to do one MS_BIND mount and then MS_REMOUNT|MS_RDONLY the same * one. According to mount(2) manpage, MS_BIND honors MS_RDONLY from * kernel 2.6.26 onwards. However, this apparently does not work on * kernel 3.8. Unfortunately, on that very same kernel, doing the * same trick as above doesn't seem to work either, there one needs * to ALSO specify MS_BIND for the remount, otherwise the entire * fs is remounted read-only or the mount fails because it's busy... * MS_REMOUNT|MS_BIND|MS_RDONLY seems to work for kernels as low as * 2.6.32... */ { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "proc", "%r/proc", "proc", MS_NODEV|MS_NOEXEC|MS_NOSUID, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/sys/net", "%r/proc/net", NULL, MS_BIND, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/sys", "%r/proc/sys", NULL, MS_BIND, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, NULL, "%r/proc/sys", NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/net", "%r/proc/sys/net", NULL, MS_MOVE, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, "%r/proc/sysrq-trigger", "%r/proc/sysrq-trigger", NULL, MS_BIND, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_MIXED, NULL, "%r/proc/sysrq-trigger", NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL }, { LXC_AUTO_PROC_MASK, LXC_AUTO_PROC_RW, "proc", "%r/proc", "proc", MS_NODEV|MS_NOEXEC|MS_NOSUID, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_RW, "sysfs", "%r/sys", "sysfs", 0, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_RO, "sysfs", "%r/sys", "sysfs", MS_RDONLY, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "sysfs", "%r/sys", "sysfs", MS_NODEV|MS_NOEXEC|MS_NOSUID, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "%r/sys", "%r/sys", NULL, MS_BIND, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, NULL, "%r/sys", NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "sysfs", "%r/sys/devices/virtual/net", "sysfs", 0, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, "%r/sys/devices/virtual/net/devices/virtual/net", "%r/sys/devices/virtual/net", NULL, MS_BIND, NULL }, { LXC_AUTO_SYS_MASK, LXC_AUTO_SYS_MIXED, NULL, "%r/sys/devices/virtual/net", NULL, MS_REMOUNT|MS_BIND|MS_NOSUID|MS_NODEV|MS_NOEXEC, NULL }, { 0, 0, NULL, NULL, NULL, 0, NULL } }; for (i = 0; default_mounts[i].match_mask; i++) { if ((flags & default_mounts[i].match_mask) == default_mounts[i].match_flag) { char *source = NULL; char *destination = NULL; int saved_errno; unsigned long mflags; if (default_mounts[i].source) { /* will act like strdup if %r is not present */ source = lxc_string_replace("%r", conf->rootfs.path ? conf->rootfs.mount : "", default_mounts[i].source); if (!source) { SYSERROR("memory allocation error"); return -1; } } if (default_mounts[i].destination) { /* will act like strdup if %r is not present */ destination = lxc_string_replace("%r", conf->rootfs.path ? conf->rootfs.mount : "", default_mounts[i].destination); if (!destination) { saved_errno = errno; SYSERROR("memory allocation error"); free(source); errno = saved_errno; return -1; } } mflags = add_required_remount_flags(source, destination, default_mounts[i].flags); r = mount(source, destination, default_mounts[i].fstype, mflags, default_mounts[i].options); saved_errno = errno; if (r < 0 && errno == ENOENT) { INFO("Mount source or target for %s on %s doesn't exist. Skipping.", source, destination); r = 0; } else if (r < 0) SYSERROR("error mounting %s on %s flags %lu", source, destination, mflags); free(source); free(destination); if (r < 0) { errno = saved_errno; return -1; } } } if (flags & LXC_AUTO_CGROUP_MASK) { int cg_flags; cg_flags = flags & LXC_AUTO_CGROUP_MASK; /* If the type of cgroup mount was not specified, it depends on the * container's capabilities as to what makes sense: if we have * CAP_SYS_ADMIN, the read-only part can be remounted read-write * anyway, so we may as well default to read-write; then the admin * will not be given a false sense of security. (And if they really * want mixed r/o r/w, then they can explicitly specify :mixed.) * OTOH, if the container lacks CAP_SYS_ADMIN, do only default to * :mixed, because then the container can't remount it read-write. */ if (cg_flags == LXC_AUTO_CGROUP_NOSPEC || cg_flags == LXC_AUTO_CGROUP_FULL_NOSPEC) { int has_sys_admin = 0; if (!lxc_list_empty(&conf->keepcaps)) { has_sys_admin = in_caplist(CAP_SYS_ADMIN, &conf->keepcaps); } else { has_sys_admin = !in_caplist(CAP_SYS_ADMIN, &conf->caps); } if (cg_flags == LXC_AUTO_CGROUP_NOSPEC) { cg_flags = has_sys_admin ? LXC_AUTO_CGROUP_RW : LXC_AUTO_CGROUP_MIXED; } else { cg_flags = has_sys_admin ? LXC_AUTO_CGROUP_FULL_RW : LXC_AUTO_CGROUP_FULL_MIXED; } } if (!cgroup_mount(conf->rootfs.path ? conf->rootfs.mount : "", handler, cg_flags)) { SYSERROR("error mounting /sys/fs/cgroup"); return -1; } } return 0; }
C
lxc
1
CVE-2013-0281
https://www.cvedetails.com/cve/CVE-2013-0281/
CWE-399
https://github.com/ClusterLabs/pacemaker/commit/564f7cc2a51dcd2f28ab12a13394f31be5aa3c93
564f7cc2a51dcd2f28ab12a13394f31be5aa3c93
High: core: Internal tls api improvements for reuse with future LRMD tls backend.
queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { cib_local_notify_t *notify = calloc(1, sizeof(cib_local_notify_t)); notify->notify_src = notify_src; notify->client_id = strdup(client_id); notify->sync_reply = sync_reply; notify->from_peer = from_peer; if (!local_notify_queue) { local_notify_queue = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, local_notify_destroy_callback); } g_hash_table_insert(local_notify_queue, GINT_TO_POINTER(cib_local_bcast_num), notify); }
queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { cib_local_notify_t *notify = calloc(1, sizeof(cib_local_notify_t)); notify->notify_src = notify_src; notify->client_id = strdup(client_id); notify->sync_reply = sync_reply; notify->from_peer = from_peer; if (!local_notify_queue) { local_notify_queue = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, local_notify_destroy_callback); } g_hash_table_insert(local_notify_queue, GINT_TO_POINTER(cib_local_bcast_num), notify); }
C
pacemaker
0
CVE-2015-5307
https://www.cvedetails.com/cve/CVE-2015-5307/
CWE-399
https://github.com/torvalds/linux/commit/54a20552e1eae07aa240fa370a0293e006b5faed
54a20552e1eae07aa240fa370a0293e006b5faed
KVM: x86: work around infinite loop in microcode when #AC is delivered It was found that a guest can DoS a host by triggering an infinite stream of "alignment check" (#AC) exceptions. This causes the microcode to enter an infinite loop where the core never receives another interrupt. The host kernel panics pretty quickly due to the effects (CVE-2015-5307). Signed-off-by: Eric Northup <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { u64 guest_efer; u64 ignore_bits; guest_efer = vmx->vcpu.arch.efer; /* * NX is emulated; LMA and LME handled by hardware; SCE meaningless * outside long mode */ ignore_bits = EFER_NX | EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].mask = ~ignore_bits; clear_atomic_switch_msr(vmx, MSR_EFER); /* * On EPT, we can't emulate NX, so we must switch EFER atomically. * On CPUs that support "load IA32_EFER", always switch EFER * atomically, since it's faster than switching it manually. */ if (cpu_has_load_ia32_efer || (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { guest_efer = vmx->vcpu.arch.efer; if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; } return true; }
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { u64 guest_efer; u64 ignore_bits; guest_efer = vmx->vcpu.arch.efer; /* * NX is emulated; LMA and LME handled by hardware; SCE meaningless * outside long mode */ ignore_bits = EFER_NX | EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].mask = ~ignore_bits; clear_atomic_switch_msr(vmx, MSR_EFER); /* * On EPT, we can't emulate NX, so we must switch EFER atomically. * On CPUs that support "load IA32_EFER", always switch EFER * atomically, since it's faster than switching it manually. */ if (cpu_has_load_ia32_efer || (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { guest_efer = vmx->vcpu.arch.efer; if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; } return true; }
C
linux
0
CVE-2016-10030
https://www.cvedetails.com/cve/CVE-2016-10030/
CWE-284
https://github.com/SchedMD/slurm/commit/92362a92fffe60187df61f99ab11c249d44120ee
92362a92fffe60187df61f99ab11c249d44120ee
Fix security issue in _prolog_error(). Fix security issue caused by insecure file path handling triggered by the failure of a Prolog script. To exploit this a user needs to anticipate or cause the Prolog to fail for their job. (This commit is slightly different from the fix to the 15.08 branch.) CVE-2016-10030.
static int _step_limits_match(void *x, void *key) { job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x; step_loc_t *step_ptr = (step_loc_t *) key; if ((job_limits_ptr->job_id == step_ptr->jobid) && (job_limits_ptr->step_id == step_ptr->stepid)) return 1; return 0; }
static int _step_limits_match(void *x, void *key) { job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x; step_loc_t *step_ptr = (step_loc_t *) key; if ((job_limits_ptr->job_id == step_ptr->jobid) && (job_limits_ptr->step_id == step_ptr->stepid)) return 1; return 0; }
C
slurm
0
CVE-2013-2015
https://www.cvedetails.com/cve/CVE-2013-2015/
CWE-399
https://github.com/torvalds/linux/commit/0e9a9a1ad619e7e987815d20262d36a2f95717ca
0e9a9a1ad619e7e987815d20262d36a2f95717ca
ext4: avoid hang when mounting non-journal filesystems with orphan list When trying to mount a file system which does not contain a journal, but which does have a orphan list containing an inode which needs to be truncated, the mount call with hang forever in ext4_orphan_cleanup() because ext4_orphan_del() will return immediately without removing the inode from the orphan list, leading to an uninterruptible loop in kernel code which will busy out one of the CPU's on the system. This can be trivially reproduced by trying to mount the file system found in tests/f_orphan_extents_inode/image.gz from the e2fsprogs source tree. If a malicious user were to put this on a USB stick, and mount it on a Linux desktop which has automatic mounts enabled, this could be considered a potential denial of service attack. (Not a big deal in practice, but professional paranoids worry about such things, and have even been known to allocate CVE numbers for such problems.) Signed-off-by: "Theodore Ts'o" <[email protected]> Reviewed-by: Zheng Liu <[email protected]> Cc: [email protected]
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, struct inode *inode, struct ext4_dir_entry_2 *de, struct buffer_head *bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned int blocksize = dir->i_sb->s_blocksize; unsigned short reclen; int csum_size = 0; int err; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); reclen = EXT4_DIR_REC_LEN(namelen); if (!de) { err = ext4_find_dest_de(dir, inode, bh, bh->b_data, blocksize - csum_size, name, namelen, &de); if (err) return err; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) { ext4_std_error(dir->i_sb, err); return err; } /* By now the buffer is marked for journaling */ ext4_insert_dentry(inode, de, blocksize, name, namelen); /* * XXX shouldn't update any times until successful * completion of syscall, but too many callers depend * on this. * * XXX similarly, too many callers depend on * ext4_new_inode() setting the times, but error * recovery deletes the inode, so the worst that can * happen is that the times are slightly out of date * and/or different from the directory change time. */ dir->i_mtime = dir->i_ctime = ext4_current_time(dir); ext4_update_dx_flag(dir); dir->i_version++; ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); return 0; }
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, struct inode *inode, struct ext4_dir_entry_2 *de, struct buffer_head *bh) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned int blocksize = dir->i_sb->s_blocksize; unsigned short reclen; int csum_size = 0; int err; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext4_dir_entry_tail); reclen = EXT4_DIR_REC_LEN(namelen); if (!de) { err = ext4_find_dest_de(dir, inode, bh, bh->b_data, blocksize - csum_size, name, namelen, &de); if (err) return err; } BUFFER_TRACE(bh, "get_write_access"); err = ext4_journal_get_write_access(handle, bh); if (err) { ext4_std_error(dir->i_sb, err); return err; } /* By now the buffer is marked for journaling */ ext4_insert_dentry(inode, de, blocksize, name, namelen); /* * XXX shouldn't update any times until successful * completion of syscall, but too many callers depend * on this. * * XXX similarly, too many callers depend on * ext4_new_inode() setting the times, but error * recovery deletes the inode, so the worst that can * happen is that the times are slightly out of date * and/or different from the directory change time. */ dir->i_mtime = dir->i_ctime = ext4_current_time(dir); ext4_update_dx_flag(dir); dir->i_version++; ext4_mark_inode_dirty(handle, dir); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_dirent_node(handle, dir, bh); if (err) ext4_std_error(dir->i_sb, err); return 0; }
C
linux
0
CVE-2016-1621
https://www.cvedetails.com/cve/CVE-2016-1621/
CWE-119
https://android.googlesource.com/platform/external/libvpx/+/5a9753fca56f0eeb9f61e342b2fccffc364f9426
5a9753fca56f0eeb9f61e342b2fccffc364f9426
Merge Conflict Fix CL to lmp-mr1-release for ag/849478 DO NOT MERGE - libvpx: Pull from upstream Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06 BUG=23452792 Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
virtual void SetUp() { UUT_ = GET_PARAM(2); #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ != 0) mask_ = (1 << UUT_->use_highbd_) - 1; else mask_ = 255; #endif /* Set up guard blocks for an inner block centered in the outer block */ for (int i = 0; i < kOutputBufferSize; ++i) { if (IsIndexInBorder(i)) output_[i] = 255; else output_[i] = 0; } ::libvpx_test::ACMRandom prng; for (int i = 0; i < kInputBufferSize; ++i) { if (i & 1) { input_[i] = 255; #if CONFIG_VP9_HIGHBITDEPTH input16_[i] = mask_; #endif } else { input_[i] = prng.Rand8Extremes(); #if CONFIG_VP9_HIGHBITDEPTH input16_[i] = prng.Rand16() & mask_; #endif } } }
virtual void SetUp() { UUT_ = GET_PARAM(2); /* Set up guard blocks for an inner block centered in the outer block */ for (int i = 0; i < kOutputBufferSize; ++i) { if (IsIndexInBorder(i)) output_[i] = 255; else output_[i] = 0; } ::libvpx_test::ACMRandom prng; for (int i = 0; i < kInputBufferSize; ++i) input_[i] = prng.Rand8Extremes(); }
C
Android
1
CVE-2017-5546
https://www.cvedetails.com/cve/CVE-2017-5546/
null
https://github.com/torvalds/linux/commit/c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
c4e490cf148e85ead0d1b1c2caaba833f1d5b29f
mm/slab.c: fix SLAB freelist randomization duplicate entries This patch fixes a bug in the freelist randomization code. When a high random number is used, the freelist will contain duplicate entries. It will result in different allocations sharing the same chunk. It will result in odd behaviours and crashes. It should be uncommon but it depends on the machines. We saw it happening more often on some machines (every few hours of running tests). Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: John Sperbeck <[email protected]> Signed-off-by: Thomas Garnier <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Pekka Enberg <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void do_drain(void *arg) { struct kmem_cache *cachep = arg; struct array_cache *ac; int node = numa_mem_id(); struct kmem_cache_node *n; LIST_HEAD(list); check_irq_off(); ac = cpu_cache_get(cachep); n = get_node(cachep, node); spin_lock(&n->list_lock); free_block(cachep, ac->entry, ac->avail, node, &list); spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); ac->avail = 0; }
static void do_drain(void *arg) { struct kmem_cache *cachep = arg; struct array_cache *ac; int node = numa_mem_id(); struct kmem_cache_node *n; LIST_HEAD(list); check_irq_off(); ac = cpu_cache_get(cachep); n = get_node(cachep, node); spin_lock(&n->list_lock); free_block(cachep, ac->entry, ac->avail, node, &list); spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); ac->avail = 0; }
C
linux
0
CVE-2012-2880
https://www.cvedetails.com/cve/CVE-2012-2880/
CWE-362
https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
[Sync] Cleanup all tab sync enabling logic now that its on by default. BUG=none TEST= Review URL: https://chromiumcodereview.appspot.com/10443046 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98
void ProfileSyncService::OnClearServerDataSucceeded() { clear_server_data_timer_.Stop(); if (clear_server_data_state_ != CLEAR_SUCCEEDED) { clear_server_data_state_ = CLEAR_SUCCEEDED; NotifyObservers(); } }
void ProfileSyncService::OnClearServerDataSucceeded() { clear_server_data_timer_.Stop(); if (clear_server_data_state_ != CLEAR_SUCCEEDED) { clear_server_data_state_ = CLEAR_SUCCEEDED; NotifyObservers(); } }
C
Chrome
0
CVE-2011-4131
https://www.cvedetails.com/cve/CVE-2011-4131/
CWE-189
https://github.com/torvalds/linux/commit/bf118a342f10dafe44b14451a1392c3254629a1f
bf118a342f10dafe44b14451a1392c3254629a1f
NFSv4: include bitmap in nfsv4 get acl data The NFSv4 bitmap size is unbounded: a server can return an arbitrary sized bitmap in an FATTR4_WORD0_ACL request. Replace using the nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data xdr length to the (cached) acl page data. This is a general solution to commit e5012d1f "NFSv4.1: update nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead when getting ACLs. Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved. Cc: [email protected] Signed-off-by: Andy Adamson <[email protected]> Signed-off-by: Trond Myklebust <[email protected]>
static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy) { return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE); }
static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy) { return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE); }
C
linux
0
CVE-2018-16253
https://www.cvedetails.com/cve/CVE-2018-16253/
CWE-347
https://github.com/igrr/axtls-8266/commit/5efe2947ab45e81d84b5f707c51d1c64be52f36c
5efe2947ab45e81d84b5f707c51d1c64be52f36c
Apply CVE fixes for X509 parsing Apply patches developed by Sze Yiu which correct a vulnerability in X509 parsing. See CVE-2018-16150 and CVE-2018-16149 for more info.
static int x509_v3_basic_constraints(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { int ret = X509_OK; int lenSeq = 0; if ((offset = asn1_is_basic_constraints(cert, offset)) == 0) goto end_contraints; x509_ctx->basic_constraint_present = true; x509_ctx->basic_constraint_is_critical = asn1_is_critical_ext(cert, &offset); /* Assign Defaults in case not specified basic_constraint_cA will already by zero by virtue of the calloc */ x509_ctx->basic_constraint_cA = 0; /* basic_constraint_pathLenConstraint is unlimited by default. 10000 is just a large number (limits.h is not already included) */ x509_ctx->basic_constraint_pathLenConstraint = 10000; if ((asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) < 0) || ((lenSeq = asn1_next_obj(cert, &offset, ASN1_SEQUENCE)) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than zero, continue with the basic_constraint_cA */ if ((lenSeq>0)&&(asn1_get_bool(cert, &offset, &x509_ctx->basic_constraint_cA) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than 3, it has more content than the basic_constraint_cA bool, so grab the pathLenConstraint */ if ((lenSeq>3) && (asn1_get_int(cert, &offset, &x509_ctx->basic_constraint_pathLenConstraint) < 0)) { ret = X509_NOT_OK; } end_contraints: return ret; }
static int x509_v3_basic_constraints(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { int ret = X509_OK; int lenSeq = 0; if ((offset = asn1_is_basic_constraints(cert, offset)) == 0) goto end_contraints; x509_ctx->basic_constraint_present = true; x509_ctx->basic_constraint_is_critical = asn1_is_critical_ext(cert, &offset); /* Assign Defaults in case not specified basic_constraint_cA will already by zero by virtue of the calloc */ x509_ctx->basic_constraint_cA = 0; /* basic_constraint_pathLenConstraint is unlimited by default. 10000 is just a large number (limits.h is not already included) */ x509_ctx->basic_constraint_pathLenConstraint = 10000; if ((asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) < 0) || ((lenSeq = asn1_next_obj(cert, &offset, ASN1_SEQUENCE)) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than zero, continue with the basic_constraint_cA */ if ((lenSeq>0)&&(asn1_get_bool(cert, &offset, &x509_ctx->basic_constraint_cA) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than 3, it has more content than the basic_constraint_cA bool, so grab the pathLenConstraint */ if ((lenSeq>3) && (asn1_get_int(cert, &offset, &x509_ctx->basic_constraint_pathLenConstraint) < 0)) { ret = X509_NOT_OK; } end_contraints: return ret; }
C
axtls-8266
0
CVE-2013-6636
https://www.cvedetails.com/cve/CVE-2013-6636/
CWE-20
https://github.com/chromium/chromium/commit/5cfe3023574666663d970ce48cdbc8ed15ce61d9
5cfe3023574666663d970ce48cdbc8ed15ce61d9
Clear out some minor TODOs. BUG=none Review URL: https://codereview.chromium.org/1047063002 Cr-Commit-Position: refs/heads/master@{#322959}
gfx::Size AutofillDialogViews::NotificationArea::GetPreferredSize() const { gfx::Size size = views::View::GetPreferredSize(); size.set_width(1); return size; }
gfx::Size AutofillDialogViews::NotificationArea::GetPreferredSize() const { gfx::Size size = views::View::GetPreferredSize(); size.set_width(1); return size; }
C
Chrome
0
CVE-2012-5131
https://www.cvedetails.com/cve/CVE-2012-5131/
null
https://github.com/chromium/chromium/commit/d0c31f0342cefc46a3b3d80359a9779d044d4c0d
d0c31f0342cefc46a3b3d80359a9779d044d4c0d
Remove BlobRegistry indirection since there is only one implementation. BUG= Review URL: https://chromiumcodereview.appspot.com/15851008 git-svn-id: svn://svn.chromium.org/blink/trunk@152746 bbb929c8-8fbe-4397-9dbb-9b2b20218538
bool FileReaderLoader::isCompleted() const { return m_bytesLoaded == m_totalBytes; }
bool FileReaderLoader::isCompleted() const { return m_bytesLoaded == m_totalBytes; }
C
Chrome
0
CVE-2011-2918
https://www.cvedetails.com/cve/CVE-2011-2918/
CWE-399
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
perf: Remove the nmi parameter from the swevent and overflow interface The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <[email protected]> Cc: Michael Cree <[email protected]> Cc: Will Deacon <[email protected]> Cc: Deng-Cheng Zhu <[email protected]> Cc: Anton Blanchard <[email protected]> Cc: Eric B Munson <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Paul Mundt <[email protected]> Cc: David S. Miller <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jason Wessel <[email protected]> Cc: Don Zickus <[email protected]> Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); }
void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); }
C
linux
0
CVE-2012-5148
https://www.cvedetails.com/cve/CVE-2012-5148/
CWE-20
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
e89cfcb9090e8c98129ae9160c513f504db74599
Remove TabContents from TabStripModelObserver::TabDetachedAt. BUG=107201 TEST=no visible change Review URL: https://chromiumcodereview.appspot.com/11293205 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
void BrowserTabStripController::StartHighlightTabsForCommand( TabStripModel::ContextMenuCommand command_id, BaseTab* tab) { if (command_id == TabStripModel::CommandCloseOtherTabs || command_id == TabStripModel::CommandCloseTabsToRight) { int model_index = tabstrip_->GetModelIndexOfBaseTab(tab); if (IsValidIndex(model_index)) { std::vector<int> indices = model_->GetIndicesClosedByCommand(model_index, command_id); for (std::vector<int>::const_iterator i(indices.begin()); i != indices.end(); ++i) { tabstrip_->StartHighlight(*i); } } } }
void BrowserTabStripController::StartHighlightTabsForCommand( TabStripModel::ContextMenuCommand command_id, BaseTab* tab) { if (command_id == TabStripModel::CommandCloseOtherTabs || command_id == TabStripModel::CommandCloseTabsToRight) { int model_index = tabstrip_->GetModelIndexOfBaseTab(tab); if (IsValidIndex(model_index)) { std::vector<int> indices = model_->GetIndicesClosedByCommand(model_index, command_id); for (std::vector<int>::const_iterator i(indices.begin()); i != indices.end(); ++i) { tabstrip_->StartHighlight(*i); } } } }
C
Chrome
0
CVE-2019-5797
null
null
https://github.com/chromium/chromium/commit/ba169c14aa9cc2efd708a878ae21ff34f3898fe0
ba169c14aa9cc2efd708a878ae21ff34f3898fe0
Fixing BadMessageCallback usage by SessionStorage TBR: [email protected] Bug: 916523 Change-Id: I027cc818cfba917906844ad2ec0edd7fa4761bd1 Reviewed-on: https://chromium-review.googlesource.com/c/1401604 Commit-Queue: Daniel Murphy <[email protected]> Reviewed-by: Marijn Kruisselbrink <[email protected]> Reviewed-by: Ken Rockot <[email protected]> Cr-Commit-Position: refs/heads/master@{#621772}
StoragePartitionImpl::StoragePartitionImpl( BrowserContext* browser_context, const base::FilePath& partition_path, storage::SpecialStoragePolicy* special_storage_policy) : partition_path_(partition_path), special_storage_policy_(special_storage_policy), network_context_client_binding_(this), browser_context_(browser_context), deletion_helpers_running_(0), weak_factory_(this) {}
StoragePartitionImpl::StoragePartitionImpl( BrowserContext* browser_context, const base::FilePath& partition_path, storage::SpecialStoragePolicy* special_storage_policy) : partition_path_(partition_path), special_storage_policy_(special_storage_policy), network_context_client_binding_(this), browser_context_(browser_context), deletion_helpers_running_(0), weak_factory_(this) {}
C
Chrome
0
CVE-2015-1191
https://www.cvedetails.com/cve/CVE-2015-1191/
CWE-22
https://github.com/madler/pigz/commit/fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
fdad1406b3ec809f4954ff7cdf9e99eb18c2458f
When decompressing with -N or -NT, strip any path from header name. This uses the path of the compressed file combined with the name from the header as the name of the decompressed output file. Any path information in the header name is stripped. This avoids a possible vulnerability where absolute or descending paths are put in the gzip header.
local void free_track(struct mem_track_s *mem, void *ptr) { size_t size; if (ptr != NULL) { size = MALLOC_SIZE(ptr); mem_track_grab(mem); mem->num--; mem->size -= size; mem_track_drop(mem); free(ptr); } }
local void free_track(struct mem_track_s *mem, void *ptr) { size_t size; if (ptr != NULL) { size = MALLOC_SIZE(ptr); mem_track_grab(mem); mem->num--; mem->size -= size; mem_track_drop(mem); free(ptr); } }
C
pigz
0
CVE-2017-12187
https://www.cvedetails.com/cve/CVE-2017-12187/
CWE-20
https://cgit.freedesktop.org/xorg/xserver/commit/?id=cad5a1050b7184d828aef9c1dd151c3ab649d37e
cad5a1050b7184d828aef9c1dd151c3ab649d37e
null
PictOpValid(CARD8 op) { if ( /*PictOpMinimum <= op && */ op <= PictOpMaximum) return TRUE; if (PictOpDisjointMinimum <= op && op <= PictOpDisjointMaximum) return TRUE; if (PictOpConjointMinimum <= op && op <= PictOpConjointMaximum) return TRUE; if (PictOpBlendMinimum <= op && op <= PictOpBlendMaximum) return TRUE; return FALSE; }
PictOpValid(CARD8 op) { if ( /*PictOpMinimum <= op && */ op <= PictOpMaximum) return TRUE; if (PictOpDisjointMinimum <= op && op <= PictOpDisjointMaximum) return TRUE; if (PictOpConjointMinimum <= op && op <= PictOpConjointMaximum) return TRUE; if (PictOpBlendMinimum <= op && op <= PictOpBlendMaximum) return TRUE; return FALSE; }
C
xserver
0
CVE-2012-0044
https://www.cvedetails.com/cve/CVE-2012-0044/
CWE-189
https://github.com/torvalds/linux/commit/a5cd335165e31db9dbab636fd29895d41da55dd2
a5cd335165e31db9dbab636fd29895d41da55dd2
drm: integer overflow in drm_mode_dirtyfb_ioctl() There is a potential integer overflow in drm_mode_dirtyfb_ioctl() if userspace passes in a large num_clips. The call to kmalloc would allocate a small buffer, and the call to fb->funcs->dirty may result in a memory corruption. Reported-by: Haogang Chen <[email protected]> Signed-off-by: Xi Wang <[email protected]> Cc: [email protected] Signed-off-by: Dave Airlie <[email protected]>
int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_encoder *enc_resp = data; struct drm_mode_object *obj; struct drm_encoder *encoder; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); if (!obj) { ret = -EINVAL; goto out; } encoder = obj_to_encoder(obj); if (encoder->crtc) enc_resp->crtc_id = encoder->crtc->base.id; else enc_resp->crtc_id = 0; enc_resp->encoder_type = encoder->encoder_type; enc_resp->encoder_id = encoder->base.id; enc_resp->possible_crtcs = encoder->possible_crtcs; enc_resp->possible_clones = encoder->possible_clones; out: mutex_unlock(&dev->mode_config.mutex); return ret; }
int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_encoder *enc_resp = data; struct drm_mode_object *obj; struct drm_encoder *encoder; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); if (!obj) { ret = -EINVAL; goto out; } encoder = obj_to_encoder(obj); if (encoder->crtc) enc_resp->crtc_id = encoder->crtc->base.id; else enc_resp->crtc_id = 0; enc_resp->encoder_type = encoder->encoder_type; enc_resp->encoder_id = encoder->base.id; enc_resp->possible_crtcs = encoder->possible_crtcs; enc_resp->possible_clones = encoder->possible_clones; out: mutex_unlock(&dev->mode_config.mutex); return ret; }
C
linux
0
CVE-2018-16427
https://www.cvedetails.com/cve/CVE-2018-16427/
CWE-125
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
fixed out of bounds reads Thanks to Eric Sesterhenn from X41 D-SEC GmbH for reporting and suggesting security fixes.
static int list_readers(void) { unsigned int i, rcount = sc_ctx_get_reader_count(ctx); if (rcount == 0) { printf("No smart card readers found.\n"); return 0; } printf("# Detected readers (%s)\n", ctx->reader_driver->short_name); printf("Nr. Card Features Name\n"); for (i = 0; i < rcount; i++) { sc_reader_t *reader = sc_ctx_get_reader(ctx, i); int state = sc_detect_card_presence(reader); printf("%-5d%-6s%-10s%s\n", i, state & SC_READER_CARD_PRESENT ? "Yes":"No", reader->capabilities & SC_READER_CAP_PIN_PAD ? "PIN pad":"", reader->name); if (state & SC_READER_CARD_PRESENT && verbose) { struct sc_card *card; int r; char tmp[SC_MAX_ATR_SIZE*3]; sc_bin_to_hex(reader->atr.value, reader->atr.len, tmp, sizeof(tmp) - 1, ':'); if (state & SC_READER_CARD_EXCLUSIVE) printf(" %s [EXCLUSIVE]\n", tmp); else { if ((r = sc_connect_card(reader, &card)) != SC_SUCCESS) { fprintf(stderr, " failed: %s\n", sc_strerror(r)); } else { printf(" %s %s %s\n", tmp, card->name ? card->name : "", state & SC_READER_CARD_INUSE ? "[IN USE]" : ""); sc_disconnect_card(card); } } } } return 0; }
static int list_readers(void) { unsigned int i, rcount = sc_ctx_get_reader_count(ctx); if (rcount == 0) { printf("No smart card readers found.\n"); return 0; } printf("# Detected readers (%s)\n", ctx->reader_driver->short_name); printf("Nr. Card Features Name\n"); for (i = 0; i < rcount; i++) { sc_reader_t *reader = sc_ctx_get_reader(ctx, i); int state = sc_detect_card_presence(reader); printf("%-5d%-6s%-10s%s\n", i, state & SC_READER_CARD_PRESENT ? "Yes":"No", reader->capabilities & SC_READER_CAP_PIN_PAD ? "PIN pad":"", reader->name); if (state & SC_READER_CARD_PRESENT && verbose) { struct sc_card *card; int r; char tmp[SC_MAX_ATR_SIZE*3]; sc_bin_to_hex(reader->atr.value, reader->atr.len, tmp, sizeof(tmp) - 1, ':'); if (state & SC_READER_CARD_EXCLUSIVE) printf(" %s [EXCLUSIVE]\n", tmp); else { if ((r = sc_connect_card(reader, &card)) != SC_SUCCESS) { fprintf(stderr, " failed: %s\n", sc_strerror(r)); } else { printf(" %s %s %s\n", tmp, card->name ? card->name : "", state & SC_READER_CARD_INUSE ? "[IN USE]" : ""); sc_disconnect_card(card); } } } } return 0; }
C
OpenSC
0
CVE-2013-2884
https://www.cvedetails.com/cve/CVE-2013-2884/
CWE-399
https://github.com/chromium/chromium/commit/4ac8bc08e3306f38a5ab3e551aef6ad43753579c
4ac8bc08e3306f38a5ab3e551aef6ad43753579c
Set Attr.ownerDocument in Element#setAttributeNode() Attr objects can move across documents by setAttributeNode(). So It needs to reset ownerDocument through TreeScopeAdoptr::adoptIfNeeded(). BUG=248950 TEST=set-attribute-node-from-iframe.html Review URL: https://chromiumcodereview.appspot.com/17583003 git-svn-id: svn://svn.chromium.org/blink/trunk@152938 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void Element::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const { MemoryClassInfo info(memoryObjectInfo, this, WebCoreMemoryTypes::DOM); ContainerNode::reportMemoryUsage(memoryObjectInfo); info.addMember(m_tagName, "tagName"); info.addMember(m_elementData, "elementData"); }
void Element::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const { MemoryClassInfo info(memoryObjectInfo, this, WebCoreMemoryTypes::DOM); ContainerNode::reportMemoryUsage(memoryObjectInfo); info.addMember(m_tagName, "tagName"); info.addMember(m_elementData, "elementData"); }
C
Chrome
0
CVE-2016-7534
https://www.cvedetails.com/cve/CVE-2016-7534/
CWE-125
https://github.com/ImageMagick/ImageMagick/commit/430403b0029b37decf216d57f810899cab2317dd
430403b0029b37decf216d57f810899cab2317dd
https://github.com/ImageMagick/ImageMagick/issues/126
MagickExport void SetQuantumMinIsWhite(QuantumInfo *quantum_info, const MagickBooleanType min_is_white) { assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickCoreSignature); quantum_info->min_is_white=min_is_white; }
MagickExport void SetQuantumMinIsWhite(QuantumInfo *quantum_info, const MagickBooleanType min_is_white) { assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickCoreSignature); quantum_info->min_is_white=min_is_white; }
C
ImageMagick
0
null
null
null
https://github.com/chromium/chromium/commit/aa0e1ed74972a619072341b6409dc5cacd2418aa
aa0e1ed74972a619072341b6409dc5cacd2418aa
[BlackBerry] willComposite() and didComposite() are now in InspectorController https://bugs.webkit.org/show_bug.cgi?id=110343 Patch by Alberto Garcia <[email protected]> on 2013-02-21 Reviewed by Carlos Garcia Campos. This was changed in r142879. * Api/WebPage.cpp: (BlackBerry::WebKit::WebPagePrivate::willComposite): (BlackBerry::WebKit::WebPagePrivate::didComposite): git-svn-id: svn://svn.chromium.org/blink/trunk@143584 bbb929c8-8fbe-4397-9dbb-9b2b20218538
bool WebPagePrivate::compositorDrawsRootLayer() const { if (!m_mainFrame) return false; #if USE(ACCELERATED_COMPOSITING) if (Platform::userInterfaceThreadMessageClient()->isCurrentThread()) return m_compositor && m_compositor->drawsRootLayer(); RenderView* renderView = m_mainFrame->contentRenderer(); if (!renderView || !renderView->layer() || !renderView->layer()->backing()) return false; return !renderView->layer()->backing()->paintingGoesToWindow(); #else return false; #endif }
bool WebPagePrivate::compositorDrawsRootLayer() const { if (!m_mainFrame) return false; #if USE(ACCELERATED_COMPOSITING) if (Platform::userInterfaceThreadMessageClient()->isCurrentThread()) return m_compositor && m_compositor->drawsRootLayer(); RenderView* renderView = m_mainFrame->contentRenderer(); if (!renderView || !renderView->layer() || !renderView->layer()->backing()) return false; return !renderView->layer()->backing()->paintingGoesToWindow(); #else return false; #endif }
C
Chrome
0
CVE-2011-2854
https://www.cvedetails.com/cve/CVE-2011-2854/
CWE-399
https://github.com/chromium/chromium/commit/108a923ac784e62ca84b45cb6241f77a942e233a
108a923ac784e62ca84b45cb6241f77a942e233a
Workaround for bad driver issue with NVIDIA GeForce 7300 GT on Mac 10.5. BUG=87283 TEST=Run on a machine with NVIDIA GeForce 7300 GT on Mac 10.5 immediately after booting. Review URL: http://codereview.chromium.org/7373018 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@92651 0039d316-1c4b-4281-b951-d872f2087c98
int bytes_per_row() const { return bytes_per_row_; }
int bytes_per_row() const { return bytes_per_row_; }
C
Chrome
0
CVE-2017-6991
https://www.cvedetails.com/cve/CVE-2017-6991/
CWE-119
https://github.com/chromium/chromium/commit/3bfe67c9c4b45eb713326aae7a67c8f7390dae08
3bfe67c9c4b45eb713326aae7a67c8f7390dae08
sqlite: safely move pointer values through SQL. This lands https://www.sqlite.org/src/timeline?c=d6a44b35 in third_party/sqlite/src/ and third_party/sqlite/patches/0013-Add-new-interfaces-sqlite3_bind_pointer-sqlite3_resu.patch and re-generates third_party/sqlite/amalgamation/* using the script at third_party/sqlite/google_generate_amalgamation.sh. The CL also adds a layout test that verifies the patch works as intended. BUG=742407 Change-Id: I2e1a457459cd2e975e6241b630e7b79c82545981 Reviewed-on: https://chromium-review.googlesource.com/572976 Reviewed-by: Chris Mumford <[email protected]> Commit-Queue: Victor Costan <[email protected]> Cr-Commit-Position: refs/heads/master@{#487275}
static SQLITE_NOINLINE void autoIncrementEnd(Parse *pParse){ AutoincInfo *p; Vdbe *v = pParse->pVdbe; sqlite3 *db = pParse->db; assert( v ); for(p = pParse->pAinc; p; p = p->pNext){ static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList autoIncEnd[] = { /* 0 */ {OP_NotNull, 0, 2, 0}, /* 1 */ {OP_NewRowid, 0, 0, 0}, /* 2 */ {OP_MakeRecord, 0, 2, 0}, /* 3 */ {OP_Insert, 0, 0, 0}, /* 4 */ {OP_Close, 0, 0, 0} }; VdbeOp *aOp; Db *pDb = &db->aDb[p->iDb]; int iRec; int memId = p->regCtr; iRec = sqlite3GetTempReg(pParse); assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); aOp = sqlite3VdbeAddOpList(v, ArraySize(autoIncEnd), autoIncEnd, iLn); if( aOp==0 ) break; aOp[0].p1 = memId+1; aOp[1].p2 = memId+1; aOp[2].p1 = memId-1; aOp[2].p3 = iRec; aOp[3].p2 = iRec; aOp[3].p3 = memId+1; aOp[3].p5 = OPFLAG_APPEND; sqlite3ReleaseTempReg(pParse, iRec); } }
static SQLITE_NOINLINE void autoIncrementEnd(Parse *pParse){ AutoincInfo *p; Vdbe *v = pParse->pVdbe; sqlite3 *db = pParse->db; assert( v ); for(p = pParse->pAinc; p; p = p->pNext){ static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList autoIncEnd[] = { /* 0 */ {OP_NotNull, 0, 2, 0}, /* 1 */ {OP_NewRowid, 0, 0, 0}, /* 2 */ {OP_MakeRecord, 0, 2, 0}, /* 3 */ {OP_Insert, 0, 0, 0}, /* 4 */ {OP_Close, 0, 0, 0} }; VdbeOp *aOp; Db *pDb = &db->aDb[p->iDb]; int iRec; int memId = p->regCtr; iRec = sqlite3GetTempReg(pParse); assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); aOp = sqlite3VdbeAddOpList(v, ArraySize(autoIncEnd), autoIncEnd, iLn); if( aOp==0 ) break; aOp[0].p1 = memId+1; aOp[1].p2 = memId+1; aOp[2].p1 = memId-1; aOp[2].p3 = iRec; aOp[3].p2 = iRec; aOp[3].p3 = memId+1; aOp[3].p5 = OPFLAG_APPEND; sqlite3ReleaseTempReg(pParse, iRec); } }
C
Chrome
0
CVE-2011-1767
https://www.cvedetails.com/cve/CVE-2011-1767/
null
https://github.com/torvalds/linux/commit/c2892f02712e9516d72841d5c019ed6916329794
c2892f02712e9516d72841d5c019ed6916329794
gre: fix netns vs proto registration ordering GRE protocol receive hook can be called right after protocol addition is done. If netns stuff is not yet initialized, we're going to oops in net_generic(). This is remotely oopsable if ip_gre is compiled as module and packet comes at unfortunate moment of module loading. Signed-off-by: Alexey Dobriyan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int __init ipgre_init(void) { int err; printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); err = register_pernet_device(&ipgre_net_ops); if (err < 0) return err; err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE); if (err < 0) { printk(KERN_INFO "ipgre init: can't add protocol\n"); goto add_proto_failed; } err = rtnl_link_register(&ipgre_link_ops); if (err < 0) goto rtnl_link_failed; err = rtnl_link_register(&ipgre_tap_ops); if (err < 0) goto tap_ops_failed; out: return err; tap_ops_failed: rtnl_link_unregister(&ipgre_link_ops); rtnl_link_failed: inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); add_proto_failed: unregister_pernet_device(&ipgre_net_ops); goto out; }
static int __init ipgre_init(void) { int err; printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) { printk(KERN_INFO "ipgre init: can't add protocol\n"); return -EAGAIN; } err = register_pernet_device(&ipgre_net_ops); if (err < 0) goto gen_device_failed; err = rtnl_link_register(&ipgre_link_ops); if (err < 0) goto rtnl_link_failed; err = rtnl_link_register(&ipgre_tap_ops); if (err < 0) goto tap_ops_failed; out: return err; tap_ops_failed: rtnl_link_unregister(&ipgre_link_ops); rtnl_link_failed: unregister_pernet_device(&ipgre_net_ops); gen_device_failed: inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); goto out; }
C
linux
1
CVE-2017-5120
https://www.cvedetails.com/cve/CVE-2017-5120/
null
https://github.com/chromium/chromium/commit/b7277af490d28ac7f802c015bb0ff31395768556
b7277af490d28ac7f802c015bb0ff31395768556
bindings: Support "attribute FrozenArray<T>?" Adds a quick hack to support a case of "attribute FrozenArray<T>?". Bug: 1028047 Change-Id: Ib3cecc4beb6bcc0fb0dbc667aca595454cc90c86 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1933866 Reviewed-by: Hitoshi Yoshida <[email protected]> Commit-Queue: Yuki Shiino <[email protected]> Cr-Commit-Position: refs/heads/master@{#718676}
static void StringOrNullAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) { v8::Local<v8::Object> holder = info.Holder(); TestObject* impl = V8TestObject::ToImpl(holder); V8SetReturnValueStringOrNull(info, impl->stringOrNullAttribute(), info.GetIsolate()); }
static void StringOrNullAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) { v8::Local<v8::Object> holder = info.Holder(); TestObject* impl = V8TestObject::ToImpl(holder); V8SetReturnValueStringOrNull(info, impl->stringOrNullAttribute(), info.GetIsolate()); }
C
Chrome
0
CVE-2018-17204
https://www.cvedetails.com/cve/CVE-2018-17204/
CWE-617
https://github.com/openvswitch/ovs/commit/4af6da3b275b764b1afe194df6499b33d2bf4cde
4af6da3b275b764b1afe194df6499b33d2bf4cde
ofp-group: Don't assert-fail decoding bad OF1.5 group mod type or command. When decoding a group mod, the current code validates the group type and command after the whole group mod has been decoded. The OF1.5 decoder, however, tries to use the type and command earlier, when it might still be invalid. This caused an assertion failure (via OVS_NOT_REACHED). This commit fixes the problem. ovs-vswitchd does not enable support for OpenFlow 1.5 by default. Reported-at: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9249 Signed-off-by: Ben Pfaff <[email protected]> Reviewed-by: Yifeng Sun <[email protected]>
parse_intel_port_stats_rfc2819_property(const struct ofpbuf *payload, struct ofputil_port_stats *ops) { const struct intel_port_stats_rfc2819 *rfc2819 = payload->data; if (payload->size != sizeof *rfc2819) { return OFPERR_OFPBPC_BAD_LEN; } ops->stats.rx_1_to_64_packets = ntohll(rfc2819->rx_1_to_64_packets); ops->stats.rx_65_to_127_packets = ntohll(rfc2819->rx_65_to_127_packets); ops->stats.rx_128_to_255_packets = ntohll(rfc2819->rx_128_to_255_packets); ops->stats.rx_256_to_511_packets = ntohll(rfc2819->rx_256_to_511_packets); ops->stats.rx_512_to_1023_packets = ntohll(rfc2819->rx_512_to_1023_packets); ops->stats.rx_1024_to_1522_packets = ntohll(rfc2819->rx_1024_to_1522_packets); ops->stats.rx_1523_to_max_packets = ntohll(rfc2819->rx_1523_to_max_packets); ops->stats.tx_1_to_64_packets = ntohll(rfc2819->tx_1_to_64_packets); ops->stats.tx_65_to_127_packets = ntohll(rfc2819->tx_65_to_127_packets); ops->stats.tx_128_to_255_packets = ntohll(rfc2819->tx_128_to_255_packets); ops->stats.tx_256_to_511_packets = ntohll(rfc2819->tx_256_to_511_packets); ops->stats.tx_512_to_1023_packets = ntohll(rfc2819->tx_512_to_1023_packets); ops->stats.tx_1024_to_1522_packets = ntohll(rfc2819->tx_1024_to_1522_packets); ops->stats.tx_1523_to_max_packets = ntohll(rfc2819->tx_1523_to_max_packets); ops->stats.tx_multicast_packets = ntohll(rfc2819->tx_multicast_packets); ops->stats.rx_broadcast_packets = ntohll(rfc2819->rx_broadcast_packets); ops->stats.tx_broadcast_packets = ntohll(rfc2819->tx_broadcast_packets); ops->stats.rx_undersized_errors = ntohll(rfc2819->rx_undersized_errors); ops->stats.rx_oversize_errors = ntohll(rfc2819->rx_oversize_errors); ops->stats.rx_fragmented_errors = ntohll(rfc2819->rx_fragmented_errors); ops->stats.rx_jabber_errors = ntohll(rfc2819->rx_jabber_errors); return 0; }
parse_intel_port_stats_rfc2819_property(const struct ofpbuf *payload, struct ofputil_port_stats *ops) { const struct intel_port_stats_rfc2819 *rfc2819 = payload->data; if (payload->size != sizeof *rfc2819) { return OFPERR_OFPBPC_BAD_LEN; } ops->stats.rx_1_to_64_packets = ntohll(rfc2819->rx_1_to_64_packets); ops->stats.rx_65_to_127_packets = ntohll(rfc2819->rx_65_to_127_packets); ops->stats.rx_128_to_255_packets = ntohll(rfc2819->rx_128_to_255_packets); ops->stats.rx_256_to_511_packets = ntohll(rfc2819->rx_256_to_511_packets); ops->stats.rx_512_to_1023_packets = ntohll(rfc2819->rx_512_to_1023_packets); ops->stats.rx_1024_to_1522_packets = ntohll(rfc2819->rx_1024_to_1522_packets); ops->stats.rx_1523_to_max_packets = ntohll(rfc2819->rx_1523_to_max_packets); ops->stats.tx_1_to_64_packets = ntohll(rfc2819->tx_1_to_64_packets); ops->stats.tx_65_to_127_packets = ntohll(rfc2819->tx_65_to_127_packets); ops->stats.tx_128_to_255_packets = ntohll(rfc2819->tx_128_to_255_packets); ops->stats.tx_256_to_511_packets = ntohll(rfc2819->tx_256_to_511_packets); ops->stats.tx_512_to_1023_packets = ntohll(rfc2819->tx_512_to_1023_packets); ops->stats.tx_1024_to_1522_packets = ntohll(rfc2819->tx_1024_to_1522_packets); ops->stats.tx_1523_to_max_packets = ntohll(rfc2819->tx_1523_to_max_packets); ops->stats.tx_multicast_packets = ntohll(rfc2819->tx_multicast_packets); ops->stats.rx_broadcast_packets = ntohll(rfc2819->rx_broadcast_packets); ops->stats.tx_broadcast_packets = ntohll(rfc2819->tx_broadcast_packets); ops->stats.rx_undersized_errors = ntohll(rfc2819->rx_undersized_errors); ops->stats.rx_oversize_errors = ntohll(rfc2819->rx_oversize_errors); ops->stats.rx_fragmented_errors = ntohll(rfc2819->rx_fragmented_errors); ops->stats.rx_jabber_errors = ntohll(rfc2819->rx_jabber_errors); return 0; }
C
ovs
0
null
null
null
https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d
d1a59e4e845a01d7d7b80ef184b672752a9eae4d
Fixing cross-process postMessage replies on more than two iterations. When two frames are replying to each other using event.source across processes, after the first two replies, things break down. The root cause is that in RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now properly searching for the remote frame id and returning the local one. BUG=153445 Review URL: https://chromiumcodereview.appspot.com/11040015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98
void RenderViewImpl::didCreateScriptContext(WebFrame* frame, v8::Handle<v8::Context> context, int extension_group, int world_id) { content::GetContentClient()->renderer()->DidCreateScriptContext( frame, context, extension_group, world_id); }
void RenderViewImpl::didCreateScriptContext(WebFrame* frame, v8::Handle<v8::Context> context, int extension_group, int world_id) { content::GetContentClient()->renderer()->DidCreateScriptContext( frame, context, extension_group, world_id); }
C
Chrome
0
CVE-2013-2015
https://www.cvedetails.com/cve/CVE-2013-2015/
CWE-399
https://github.com/torvalds/linux/commit/0e9a9a1ad619e7e987815d20262d36a2f95717ca
0e9a9a1ad619e7e987815d20262d36a2f95717ca
ext4: avoid hang when mounting non-journal filesystems with orphan list When trying to mount a file system which does not contain a journal, but which does have a orphan list containing an inode which needs to be truncated, the mount call with hang forever in ext4_orphan_cleanup() because ext4_orphan_del() will return immediately without removing the inode from the orphan list, leading to an uninterruptible loop in kernel code which will busy out one of the CPU's on the system. This can be trivially reproduced by trying to mount the file system found in tests/f_orphan_extents_inode/image.gz from the e2fsprogs source tree. If a malicious user were to put this on a USB stick, and mount it on a Linux desktop which has automatic mounts enabled, this could be considered a potential denial of service attack. (Not a big deal in practice, but professional paranoids worry about such things, and have even been known to allocate CVE numbers for such problems.) Signed-off-by: "Theodore Ts'o" <[email protected]> Reviewed-by: Zheng Liu <[email protected]> Cc: [email protected]
static struct buffer_head * ext4_find_entry (struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *inlined) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; ext4_lblk_t start, block, b; const u8 *name = d_name->name; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead buffer */ int num = 0; ext4_lblk_t nblocks; int i, err; int namelen; *res_dir = NULL; sb = dir->i_sb; namelen = d_name->len; if (namelen > EXT4_NAME_LEN) return NULL; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; ret = ext4_find_inline_entry(dir, d_name, res_dir, &has_inline_data); if (has_inline_data) { if (inlined) *inlined = 1; return ret; } } if ((namelen <= 2) && (name[0] == '.') && (name[1] == '.' || name[1] == '\0')) { /* * "." or ".." will only be in the first block * NFS may look up ".."; "." should be handled by the VFS */ block = start = 0; nblocks = 1; goto restart; } if (is_dx(dir)) { bh = ext4_dx_find_entry(dir, d_name, res_dir, &err); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ if (bh || (err != ERR_BAD_DX_DIR)) return bh; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); start = EXT4_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; b = block; for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { /* * Terminate if we reach the end of the * directory and must wrap, or if our * search has finished at this block. */ if (b >= nblocks || (num && block == start)) { bh_use[ra_max] = NULL; break; } num++; bh = ext4_getblk(NULL, dir, b++, 0, &err); bh_use[ra_max] = bh; if (bh) ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ EXT4_ERROR_INODE(dir, "reading directory lblock %lu", (unsigned long) block); brelse(bh); goto next; } if (!buffer_verified(bh) && !is_dx_internal_node(dir, block, (struct ext4_dir_entry *)bh->b_data) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(dir, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); goto next; } set_buffer_verified(bh); i = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { EXT4_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); return ret; }
static struct buffer_head * ext4_find_entry (struct inode *dir, const struct qstr *d_name, struct ext4_dir_entry_2 **res_dir, int *inlined) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; ext4_lblk_t start, block, b; const u8 *name = d_name->name; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead buffer */ int num = 0; ext4_lblk_t nblocks; int i, err; int namelen; *res_dir = NULL; sb = dir->i_sb; namelen = d_name->len; if (namelen > EXT4_NAME_LEN) return NULL; if (ext4_has_inline_data(dir)) { int has_inline_data = 1; ret = ext4_find_inline_entry(dir, d_name, res_dir, &has_inline_data); if (has_inline_data) { if (inlined) *inlined = 1; return ret; } } if ((namelen <= 2) && (name[0] == '.') && (name[1] == '.' || name[1] == '\0')) { /* * "." or ".." will only be in the first block * NFS may look up ".."; "." should be handled by the VFS */ block = start = 0; nblocks = 1; goto restart; } if (is_dx(dir)) { bh = ext4_dx_find_entry(dir, d_name, res_dir, &err); /* * On success, or if the error was file not found, * return. Otherwise, fall back to doing a search the * old fashioned way. */ if (bh || (err != ERR_BAD_DX_DIR)) return bh; dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " "falling back\n")); } nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); start = EXT4_I(dir)->i_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; b = block; for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { /* * Terminate if we reach the end of the * directory and must wrap, or if our * search has finished at this block. */ if (b >= nblocks || (num && block == start)) { bh_use[ra_max] = NULL; break; } num++; bh = ext4_getblk(NULL, dir, b++, 0, &err); bh_use[ra_max] = bh; if (bh) ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ EXT4_ERROR_INODE(dir, "reading directory lblock %lu", (unsigned long) block); brelse(bh); goto next; } if (!buffer_verified(bh) && !is_dx_internal_node(dir, block, (struct ext4_dir_entry *)bh->b_data) && !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) { EXT4_ERROR_INODE(dir, "checksumming directory " "block %lu", (unsigned long)block); brelse(bh); goto next; } set_buffer_verified(bh); i = search_dirblock(bh, dir, d_name, block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); if (i == 1) { EXT4_I(dir)->i_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); return ret; }
C
linux
0
CVE-2018-12249
https://www.cvedetails.com/cve/CVE-2018-12249/
CWE-476
https://github.com/mruby/mruby/commit/faa4eaf6803bd11669bc324b4c34e7162286bfa3
faa4eaf6803bd11669bc324b4c34e7162286bfa3
`mrb_class_real()` did not work for `BasicObject`; fix #4037
mrb_mod_append_features(mrb_state *mrb, mrb_value mod) { mrb_value klass; mrb_check_type(mrb, mod, MRB_TT_MODULE); mrb_get_args(mrb, "C", &klass); mrb_include_module(mrb, mrb_class_ptr(klass), mrb_class_ptr(mod)); return mod; }
mrb_mod_append_features(mrb_state *mrb, mrb_value mod) { mrb_value klass; mrb_check_type(mrb, mod, MRB_TT_MODULE); mrb_get_args(mrb, "C", &klass); mrb_include_module(mrb, mrb_class_ptr(klass), mrb_class_ptr(mod)); return mod; }
C
mruby
0
CVE-2013-2017
https://www.cvedetails.com/cve/CVE-2013-2017/
CWE-399
https://github.com/torvalds/linux/commit/6ec82562ffc6f297d0de36d65776cff8e5704867
6ec82562ffc6f297d0de36d65776cff8e5704867
veth: Dont kfree_skb() after dev_forward_skb() In case of congestion, netif_rx() frees the skb, so we must assume dev_forward_skb() also consume skb. Bug introduced by commit 445409602c092 (veth: move loopback logic to common location) We must change dev_forward_skb() to always consume skb, and veth to not double free it. Bug report : http://marc.info/?l=linux-netdev&m=127310770900442&w=3 Reported-by: Martín Ferrari <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void dev_addr_flush(struct net_device *dev) { /* rtnl_mutex must be held here */ __hw_addr_flush(&dev->dev_addrs); dev->dev_addr = NULL; }
static void dev_addr_flush(struct net_device *dev) { /* rtnl_mutex must be held here */ __hw_addr_flush(&dev->dev_addrs); dev->dev_addr = NULL; }
C
linux
0
CVE-2012-5670
https://www.cvedetails.com/cve/CVE-2012-5670/
CWE-119
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=7f2e4f4f553f6836be7683f66226afac3fa979b8
7f2e4f4f553f6836be7683f66226afac3fa979b8
null
hash_init( hashtable* ht, FT_Memory memory ) { int sz = INITIAL_HT_SIZE; FT_Error error = BDF_Err_Ok; ht->size = sz; ht->limit = sz / 3; ht->used = 0; if ( FT_NEW_ARRAY( ht->table, sz ) ) goto Exit; Exit: return error; }
hash_init( hashtable* ht, FT_Memory memory ) { int sz = INITIAL_HT_SIZE; FT_Error error = BDF_Err_Ok; ht->size = sz; ht->limit = sz / 3; ht->used = 0; if ( FT_NEW_ARRAY( ht->table, sz ) ) goto Exit; Exit: return error; }
C
savannah
0
CVE-2013-0882
https://www.cvedetails.com/cve/CVE-2013-0882/
CWE-119
https://github.com/chromium/chromium/commit/25f9415f43d607d3d01f542f067e3cc471983e6b
25f9415f43d607d3d01f542f067e3cc471983e6b
Add HTMLFormControlElement::supportsAutofocus to fix a FIXME comment. This virtual function should return true if the form control can hanlde 'autofocucs' attribute if it is specified. Note: HTMLInputElement::supportsAutofocus reuses InputType::isInteractiveContent because interactiveness is required for autofocus capability. BUG=none TEST=none; no behavior changes. Review URL: https://codereview.chromium.org/143343003 git-svn-id: svn://svn.chromium.org/blink/trunk@165432 bbb929c8-8fbe-4397-9dbb-9b2b20218538
bool HTMLFormControlElement::isAutofocusable() const bool HTMLFormControlElement::supportsAutofocus() const { return false; }
bool HTMLFormControlElement::isAutofocusable() const { if (!fastHasAttribute(autofocusAttr)) return false; if (hasTagName(inputTag)) return !toHTMLInputElement(this)->isInputTypeHidden(); if (hasTagName(selectTag)) return true; if (hasTagName(keygenTag)) return true; if (hasTagName(buttonTag)) return true; if (hasTagName(textareaTag)) return true; return false; }
C
Chrome
1
CVE-2011-4930
https://www.cvedetails.com/cve/CVE-2011-4930/
CWE-134
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
5e5571d1a431eb3c61977b6dd6ec90186ef79867
null
int GahpClient::ec2_vm_start( const char * service_url, const char * publickeyfile, const char * privatekeyfile, const char * ami_id, const char * keypair, const char * user_data, const char * user_data_file, const char * instance_type, const char * availability_zone, const char * vpc_subnet, const char * vpc_ip, StringList & groupnames, char * &instance_id, char * &error_code) { static const char* command = "EC2_VM_START"; if (server->m_commands_supported->contains_anycase(command)==FALSE) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } if ( (service_url == NULL) || (publickeyfile == NULL) || (privatekeyfile == NULL) || (ami_id == NULL) ) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } if ( !keypair ) keypair = NULLSTRING; if ( !user_data ) user_data = NULLSTRING; if ( !user_data_file ) user_data_file = NULLSTRING; if ( !instance_type ) instance_type = NULLSTRING; if ( !availability_zone || 0==strlen(availability_zone) ) availability_zone = NULLSTRING; if ( !vpc_subnet || 0==strlen(vpc_subnet) ) vpc_subnet = NULLSTRING; if ( !vpc_ip || 0==strlen(vpc_ip) ) vpc_ip = NULLSTRING; std::string reqline; char* esc1 = strdup( escapeGahpString(service_url) ); char* esc2 = strdup( escapeGahpString(publickeyfile) ); char* esc3 = strdup( escapeGahpString(privatekeyfile) ); char* esc4 = strdup( escapeGahpString(ami_id) ); char* esc5 = strdup( escapeGahpString(keypair) ); char* esc6 = strdup( escapeGahpString(user_data) ); char* esc7 = strdup( escapeGahpString(user_data_file) ); char* esc8 = strdup( escapeGahpString(instance_type) ); char* esc9 = strdup( escapeGahpString(availability_zone) ); char* esc10 = strdup( escapeGahpString(vpc_subnet) ); char* esc11 = strdup( escapeGahpString(vpc_ip) ); int x = sprintf(reqline, "%s %s %s %s %s %s %s %s %s %s %s", esc1, esc2, esc3, esc4, esc5, esc6, esc7, esc8, esc9, esc10, esc11 ); free( esc1 ); free( esc2 ); free( esc3 ); free( esc4 ); free( esc5 ); free( esc6 ); free( esc7 ); free( esc8 ); free( esc9 ); free( esc10 ); free( esc11 ); ASSERT( x > 0 ); const char * group_name; int cnt = 0; char * esc_groupname; groupnames.rewind(); if ( groupnames.number() > 0 ) { while ( (group_name = groupnames.next()) ) { esc_groupname = strdup( escapeGahpString(group_name) ); sprintf_cat(reqline, " %s", esc_groupname); cnt++; free( esc_groupname ); } } ASSERT( cnt == groupnames.number() ); const char *buf = reqline.c_str(); if ( !is_pending(command,buf) ) { if ( m_mode == results_only ) { return GAHPCLIENT_COMMAND_NOT_SUBMITTED; } now_pending(command, buf, deleg_proxy); } Gahp_Args* result = get_pending_result(command, buf); if ( result ) { int rc = 0; if ( result->argc == 2 ) { rc = atoi(result->argv[1]); if ( rc == 0 ) { EXCEPT( "Bad %s result", command ); rc = 1; } else { error_string = ""; } } else if ( result->argc == 3 ) { rc = atoi(result->argv[1]); instance_id = strdup(result->argv[2]); } else if ( result->argc == 4 ) { rc = atoi( result->argv[1] ); error_code = strdup(result->argv[2]); error_string = result->argv[3]; } else { EXCEPT( "Bad %s result", command ); } delete result; return rc; } if ( check_pending_timeout(command, buf) ) { sprintf( error_string, "%s timed out", command ); return GAHPCLIENT_COMMAND_TIMED_OUT; } return GAHPCLIENT_COMMAND_PENDING; }
int GahpClient::ec2_vm_start( const char * service_url, const char * publickeyfile, const char * privatekeyfile, const char * ami_id, const char * keypair, const char * user_data, const char * user_data_file, const char * instance_type, const char * availability_zone, const char * vpc_subnet, const char * vpc_ip, StringList & groupnames, char * &instance_id, char * &error_code) { static const char* command = "EC2_VM_START"; if (server->m_commands_supported->contains_anycase(command)==FALSE) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } if ( (service_url == NULL) || (publickeyfile == NULL) || (privatekeyfile == NULL) || (ami_id == NULL) ) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } if ( !keypair ) keypair = NULLSTRING; if ( !user_data ) user_data = NULLSTRING; if ( !user_data_file ) user_data_file = NULLSTRING; if ( !instance_type ) instance_type = NULLSTRING; if ( !availability_zone || 0==strlen(availability_zone) ) availability_zone = NULLSTRING; if ( !vpc_subnet || 0==strlen(vpc_subnet) ) vpc_subnet = NULLSTRING; if ( !vpc_ip || 0==strlen(vpc_ip) ) vpc_ip = NULLSTRING; std::string reqline; char* esc1 = strdup( escapeGahpString(service_url) ); char* esc2 = strdup( escapeGahpString(publickeyfile) ); char* esc3 = strdup( escapeGahpString(privatekeyfile) ); char* esc4 = strdup( escapeGahpString(ami_id) ); char* esc5 = strdup( escapeGahpString(keypair) ); char* esc6 = strdup( escapeGahpString(user_data) ); char* esc7 = strdup( escapeGahpString(user_data_file) ); char* esc8 = strdup( escapeGahpString(instance_type) ); char* esc9 = strdup( escapeGahpString(availability_zone) ); char* esc10 = strdup( escapeGahpString(vpc_subnet) ); char* esc11 = strdup( escapeGahpString(vpc_ip) ); int x = sprintf(reqline, "%s %s %s %s %s %s %s %s %s %s %s", esc1, esc2, esc3, esc4, esc5, esc6, esc7, esc8, esc9, esc10, esc11 ); free( esc1 ); free( esc2 ); free( esc3 ); free( esc4 ); free( esc5 ); free( esc6 ); free( esc7 ); free( esc8 ); free( esc9 ); free( esc10 ); free( esc11 ); ASSERT( x > 0 ); const char * group_name; int cnt = 0; char * esc_groupname; groupnames.rewind(); if ( groupnames.number() > 0 ) { while ( (group_name = groupnames.next()) ) { esc_groupname = strdup( escapeGahpString(group_name) ); sprintf_cat(reqline, " %s", esc_groupname); cnt++; free( esc_groupname ); } } ASSERT( cnt == groupnames.number() ); const char *buf = reqline.c_str(); if ( !is_pending(command,buf) ) { if ( m_mode == results_only ) { return GAHPCLIENT_COMMAND_NOT_SUBMITTED; } now_pending(command, buf, deleg_proxy); } Gahp_Args* result = get_pending_result(command, buf); if ( result ) { int rc = 0; if ( result->argc == 2 ) { rc = atoi(result->argv[1]); if ( rc == 0 ) { EXCEPT( "Bad %s result", command ); rc = 1; } else { error_string = ""; } } else if ( result->argc == 3 ) { rc = atoi(result->argv[1]); instance_id = strdup(result->argv[2]); } else if ( result->argc == 4 ) { rc = atoi( result->argv[1] ); error_code = strdup(result->argv[2]); error_string = result->argv[3]; } else { EXCEPT( "Bad %s result", command ); } delete result; return rc; } if ( check_pending_timeout(command, buf) ) { sprintf( error_string, "%s timed out", command ); return GAHPCLIENT_COMMAND_TIMED_OUT; } return GAHPCLIENT_COMMAND_PENDING; }
CPP
htcondor
0
CVE-2017-5009
https://www.cvedetails.com/cve/CVE-2017-5009/
CWE-119
https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
1c40f9042ae2d6ee7483d72998aabb5e73b2ff60
DevTools: send proper resource type in Network.RequestWillBeSent This patch plumbs resoure type into the DispatchWillSendRequest instrumenation. This allows us to report accurate type in Network.RequestWillBeSent event, instead of "Other", that we report today. BUG=765501 R=dgozman Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c Reviewed-on: https://chromium-review.googlesource.com/667504 Reviewed-by: Pavel Feldman <[email protected]> Reviewed-by: Dmitry Gozman <[email protected]> Commit-Queue: Andrey Lushnikov <[email protected]> Cr-Commit-Position: refs/heads/master@{#507936}
std::unique_ptr<TracedValue> InspectorRecalculateStylesEvent::Data( LocalFrame* frame) { std::unique_ptr<TracedValue> value = TracedValue::Create(); value->SetString("frame", ToHexString(frame)); SetCallStack(value.get()); return value; }
std::unique_ptr<TracedValue> InspectorRecalculateStylesEvent::Data( LocalFrame* frame) { std::unique_ptr<TracedValue> value = TracedValue::Create(); value->SetString("frame", ToHexString(frame)); SetCallStack(value.get()); return value; }
C
Chrome
0
CVE-2011-1800
https://www.cvedetails.com/cve/CVE-2011-1800/
CWE-189
https://github.com/chromium/chromium/commit/1777aa6484af15014b8691082a8c3075418786f5
1777aa6484af15014b8691082a8c3075418786f5
[Qt][WK2] Allow transparent WebViews https://bugs.webkit.org/show_bug.cgi?id=80608 Reviewed by Tor Arne Vestbø. Added support for transparentBackground in QQuickWebViewExperimental. This uses the existing drawsTransparentBackground property in WebKit2. Also, changed LayerTreeHostQt to set the contentsOpaque flag when the root layer changes, otherwise the change doesn't take effect. A new API test was added. * UIProcess/API/qt/qquickwebview.cpp: (QQuickWebViewPrivate::setTransparentBackground): (QQuickWebViewPrivate::transparentBackground): (QQuickWebViewExperimental::transparentBackground): (QQuickWebViewExperimental::setTransparentBackground): * UIProcess/API/qt/qquickwebview_p.h: * UIProcess/API/qt/qquickwebview_p_p.h: (QQuickWebViewPrivate): * UIProcess/API/qt/tests/qquickwebview/tst_qquickwebview.cpp: (tst_QQuickWebView): (tst_QQuickWebView::transparentWebViews): * WebProcess/WebPage/qt/LayerTreeHostQt.cpp: (WebKit::LayerTreeHostQt::LayerTreeHostQt): (WebKit::LayerTreeHostQt::setRootCompositingLayer): git-svn-id: svn://svn.chromium.org/blink/trunk@110254 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void QQuickWebViewExperimental::setAlertDialog(QDeclarativeComponent* alertDialog) { Q_D(QQuickWebView); if (d->alertDialog == alertDialog) return; d->alertDialog = alertDialog; emit alertDialogChanged(); }
void QQuickWebViewExperimental::setAlertDialog(QDeclarativeComponent* alertDialog) { Q_D(QQuickWebView); if (d->alertDialog == alertDialog) return; d->alertDialog = alertDialog; emit alertDialogChanged(); }
C
Chrome
0
CVE-2013-6636
https://www.cvedetails.com/cve/CVE-2013-6636/
CWE-20
https://github.com/chromium/chromium/commit/5cfe3023574666663d970ce48cdbc8ed15ce61d9
5cfe3023574666663d970ce48cdbc8ed15ce61d9
Clear out some minor TODOs. BUG=none Review URL: https://codereview.chromium.org/1047063002 Cr-Commit-Position: refs/heads/master@{#322959}
void AutofillDialogViews::StyledLabelLinkClicked(const gfx::Range& range, int event_flags) { delegate_->LegalDocumentLinkClicked(range); }
void AutofillDialogViews::StyledLabelLinkClicked(const gfx::Range& range, int event_flags) { delegate_->LegalDocumentLinkClicked(range); }
C
Chrome
0
CVE-2012-2895
https://www.cvedetails.com/cve/CVE-2012-2895/
CWE-119
https://github.com/chromium/chromium/commit/16dcd30c215801941d9890859fd79a234128fc3e
16dcd30c215801941d9890859fd79a234128fc3e
Refactors to simplify rename pathway in DownloadFileManager. This is https://chromiumcodereview.appspot.com/10668004 / r144817 (reverted due to CrOS failure) with the completion logic moved to after the auto-opening. The tests that test the auto-opening (for web store install) were waiting for download completion to check install, and hence were failing when completion was moved earlier. Doing this right would probably require another state (OPENED). BUG=123998 BUG-134930 [email protected] Review URL: https://chromiumcodereview.appspot.com/10701040 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145157 0039d316-1c4b-4281-b951-d872f2087c98
bool ChromeDownloadManagerDelegate::ShouldStartDownload(int32 download_id) { DownloadItem* download = download_manager_->GetActiveDownloadItem(download_id); if (!download) return false; #if defined(ENABLE_SAFE_BROWSING) DownloadProtectionService* service = GetDownloadProtectionService(); if (service) { VLOG(2) << __FUNCTION__ << "() Start SB URL check for download = " << download->DebugString(false); service->CheckDownloadUrl( DownloadProtectionService::DownloadInfo::FromDownloadItem(*download), base::Bind( &ChromeDownloadManagerDelegate::CheckDownloadUrlDone, this, download->GetId())); return false; } #endif CheckDownloadUrlDone(download_id, DownloadProtectionService::SAFE); return false; }
bool ChromeDownloadManagerDelegate::ShouldStartDownload(int32 download_id) { DownloadItem* download = download_manager_->GetActiveDownloadItem(download_id); if (!download) return false; #if defined(ENABLE_SAFE_BROWSING) DownloadProtectionService* service = GetDownloadProtectionService(); if (service) { VLOG(2) << __FUNCTION__ << "() Start SB URL check for download = " << download->DebugString(false); service->CheckDownloadUrl( DownloadProtectionService::DownloadInfo::FromDownloadItem(*download), base::Bind( &ChromeDownloadManagerDelegate::CheckDownloadUrlDone, this, download->GetId())); return false; } #endif CheckDownloadUrlDone(download_id, DownloadProtectionService::SAFE); return false; }
C
Chrome
0
CVE-2011-4611
https://www.cvedetails.com/cve/CVE-2011-4611/
CWE-189
https://github.com/torvalds/linux/commit/0837e3242c73566fc1c0196b4ec61779c25ffc93
0837e3242c73566fc1c0196b4ec61779c25ffc93
perf, powerpc: Handle events that raise an exception without overflowing Events on POWER7 can roll back if a speculative event doesn't eventually complete. Unfortunately in some rare cases they will raise a performance monitor exception. We need to catch this to ensure we reset the PMC. In all cases the PMC will be 256 or less cycles from overflow. Signed-off-by: Anton Blanchard <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Cc: <[email protected]> # as far back as it applies cleanly LKML-Reference: <20110309143842.6c22845e@kryten> Signed-off-by: Ingo Molnar <[email protected]>
static int is_limited_pmc(int pmcnum) { return (ppmu->flags & PPMU_LIMITED_PMC5_6) && (pmcnum == 5 || pmcnum == 6); }
static int is_limited_pmc(int pmcnum) { return (ppmu->flags & PPMU_LIMITED_PMC5_6) && (pmcnum == 5 || pmcnum == 6); }
C
linux
0
CVE-2019-5827
https://www.cvedetails.com/cve/CVE-2019-5827/
CWE-190
https://github.com/chromium/chromium/commit/517ac71c9ee27f856f9becde8abea7d1604af9d4
517ac71c9ee27f856f9becde8abea7d1604af9d4
sqlite: backport bugfixes for dbfuzz2 Bug: 952406 Change-Id: Icbec429742048d6674828726c96d8e265c41b595 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1568152 Reviewed-by: Chris Mumford <[email protected]> Commit-Queue: Darwin Huang <[email protected]> Cr-Commit-Position: refs/heads/master@{#651030}
static void analyzeOneTable( Parse *pParse, /* Parser context */ Table *pTab, /* Table whose indices are to be analyzed */ Index *pOnlyIdx, /* If not NULL, only analyze this one index */ int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */ int iMem, /* Available memory locations begin here */ int iTab /* Next available cursor */ ){ sqlite3 *db = pParse->db; /* Database handle */ Index *pIdx; /* An index to being analyzed */ int iIdxCur; /* Cursor open on index being analyzed */ int iTabCur; /* Table cursor */ Vdbe *v; /* The virtual machine being built up */ int i; /* Loop counter */ int jZeroRows = -1; /* Jump from here if number of rows is zero */ int iDb; /* Index of database containing pTab */ u8 needTableCnt = 1; /* True to count the table */ int regNewRowid = iMem++; /* Rowid for the inserted record */ int regStat4 = iMem++; /* Register to hold Stat4Accum object */ int regChng = iMem++; /* Index of changed index field */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 int regRowid = iMem++; /* Rowid argument passed to stat_push() */ #endif int regTemp = iMem++; /* Temporary use register */ int regTabname = iMem++; /* Register containing table name */ int regIdxname = iMem++; /* Register containing index name */ int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ int regPrev = iMem; /* MUST BE LAST (see below) */ #ifdef SQLITE_ENABLE_PREUPDATE_HOOK Table *pStat1 = 0; #endif pParse->nMem = MAX(pParse->nMem, iMem); v = sqlite3GetVdbe(pParse); if( v==0 || NEVER(pTab==0) ){ return; } if( pTab->tnum==0 ){ /* Do not gather statistics on views or virtual tables */ return; } if( sqlite3_strlike("sqlite\\_%", pTab->zName, '\\')==0 ){ /* Do not gather statistics on system tables */ return; } assert( sqlite3BtreeHoldsAllMutexes(db) ); iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDb>=0 ); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); #ifndef SQLITE_OMIT_AUTHORIZATION if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0, db->aDb[iDb].zDbSName ) ){ return; } #endif #ifdef SQLITE_ENABLE_PREUPDATE_HOOK if( db->xPreUpdateCallback ){ pStat1 = (Table*)sqlite3DbMallocZero(db, sizeof(Table) + 13); if( pStat1==0 ) return; pStat1->zName = (char*)&pStat1[1]; memcpy(pStat1->zName, "sqlite_stat1", 13); pStat1->nCol = 3; pStat1->iPKey = -1; sqlite3VdbeAddOp4(pParse->pVdbe, OP_Noop, 0, 0, 0,(char*)pStat1,P4_DYNBLOB); } #endif /* Establish a read-lock on the table at the shared-cache level. ** Open a read-only cursor on the table. Also allocate a cursor number ** to use for scanning indexes (iIdxCur). No index cursor is opened at ** this time though. */ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); iTabCur = iTab++; iIdxCur = iTab++; pParse->nTab = MAX(pParse->nTab, iTab); sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); sqlite3VdbeLoadString(v, regTabname, pTab->zName); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int nCol; /* Number of columns in pIdx. "N" */ int addrRewind; /* Address of "OP_Rewind iIdxCur" */ int addrNextRow; /* Address of "next_row:" */ const char *zIdxName; /* Name of the index */ int nColTest; /* Number of columns to test for changes */ if( pOnlyIdx && pOnlyIdx!=pIdx ) continue; if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0; if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIdx) ){ nCol = pIdx->nKeyCol; zIdxName = pTab->zName; nColTest = nCol - 1; }else{ nCol = pIdx->nColumn; zIdxName = pIdx->zName; nColTest = pIdx->uniqNotNull ? pIdx->nKeyCol-1 : nCol-1; } /* Populate the register containing the index name. */ sqlite3VdbeLoadString(v, regIdxname, zIdxName); VdbeComment((v, "Analysis for %s.%s", pTab->zName, zIdxName)); /* ** Pseudo-code for loop that calls stat_push(): ** ** Rewind csr ** if eof(csr) goto end_of_scan; ** regChng = 0 ** goto chng_addr_0; ** ** next_row: ** regChng = 0 ** if( idx(0) != regPrev(0) ) goto chng_addr_0 ** regChng = 1 ** if( idx(1) != regPrev(1) ) goto chng_addr_1 ** ... ** regChng = N ** goto chng_addr_N ** ** chng_addr_0: ** regPrev(0) = idx(0) ** chng_addr_1: ** regPrev(1) = idx(1) ** ... ** ** endDistinctTest: ** regRowid = idx(rowid) ** stat_push(P, regChng, regRowid) ** Next csr ** if !eof(csr) goto next_row; ** ** end_of_scan: */ /* Make sure there are enough memory cells allocated to accommodate ** the regPrev array and a trailing rowid (the rowid slot is required ** when building a record to insert into the sample column of ** the sqlite_stat4 table. */ pParse->nMem = MAX(pParse->nMem, regPrev+nColTest); /* Open a read-only cursor on the index being analyzed. */ assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) ); sqlite3VdbeAddOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); /* Invoke the stat_init() function. The arguments are: ** ** (1) the number of columns in the index including the rowid ** (or for a WITHOUT ROWID table, the number of PK columns), ** (2) the number of columns in the key without the rowid/pk ** (3) the number of rows in the index, ** ** ** The third argument is only used for STAT3 and STAT4 */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+3); #endif sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat4+1); sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regStat4+2); sqlite3VdbeAddOp4(v, OP_Function0, 0, regStat4+1, regStat4, (char*)&statInitFuncdef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, 2+IsStat34); /* Implementation of the following: ** ** Rewind csr ** if eof(csr) goto end_of_scan; ** regChng = 0 ** goto next_push_0; ** */ addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); addrNextRow = sqlite3VdbeCurrentAddr(v); if( nColTest>0 ){ int endDistinctTest = sqlite3VdbeMakeLabel(pParse); int *aGotoChng; /* Array of jump instruction addresses */ aGotoChng = sqlite3DbMallocRawNN(db, sizeof(int)*nColTest); if( aGotoChng==0 ) continue; /* ** next_row: ** regChng = 0 ** if( idx(0) != regPrev(0) ) goto chng_addr_0 ** regChng = 1 ** if( idx(1) != regPrev(1) ) goto chng_addr_1 ** ... ** regChng = N ** goto endDistinctTest */ sqlite3VdbeAddOp0(v, OP_Goto); addrNextRow = sqlite3VdbeCurrentAddr(v); if( nColTest==1 && pIdx->nKeyCol==1 && IsUniqueIndex(pIdx) ){ /* For a single-column UNIQUE index, once we have found a non-NULL ** row, we know that all the rest will be distinct, so skip ** subsequent distinctness tests. */ sqlite3VdbeAddOp2(v, OP_NotNull, regPrev, endDistinctTest); VdbeCoverage(v); } for(i=0; i<nColTest; i++){ char *pColl = (char*)sqlite3LocateCollSeq(pParse, pIdx->azColl[i]); sqlite3VdbeAddOp2(v, OP_Integer, i, regChng); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp); aGotoChng[i] = sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ); sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); VdbeCoverage(v); } sqlite3VdbeAddOp2(v, OP_Integer, nColTest, regChng); sqlite3VdbeGoto(v, endDistinctTest); /* ** chng_addr_0: ** regPrev(0) = idx(0) ** chng_addr_1: ** regPrev(1) = idx(1) ** ... */ sqlite3VdbeJumpHere(v, addrNextRow-1); for(i=0; i<nColTest; i++){ sqlite3VdbeJumpHere(v, aGotoChng[i]); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regPrev+i); } sqlite3VdbeResolveLabel(v, endDistinctTest); sqlite3DbFree(db, aGotoChng); } /* ** chng_addr_N: ** regRowid = idx(rowid) // STAT34 only ** stat_push(P, regChng, regRowid) // 3rd parameter STAT34 only ** Next csr ** if !eof(csr) goto next_row; */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 assert( regRowid==(regStat4+2) ); if( HasRowid(pTab) ){ sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, regRowid); }else{ Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable); int j, k, regKey; regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol); for(j=0; j<pPk->nKeyCol; j++){ k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); assert( k>=0 && k<pIdx->nColumn ); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j); VdbeComment((v, "%s", pTab->aCol[pPk->aiColumn[j]].zName)); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regKey, pPk->nKeyCol, regRowid); sqlite3ReleaseTempRange(pParse, regKey, pPk->nKeyCol); } #endif assert( regChng==(regStat4+1) ); sqlite3VdbeAddOp4(v, OP_Function0, 1, regStat4, regTemp, (char*)&statPushFuncdef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, 2+IsStat34); sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v); /* Add the entry to the stat1 table. */ callStatGet(v, regStat4, STAT_GET_STAT1, regStat1); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); #ifdef SQLITE_ENABLE_PREUPDATE_HOOK sqlite3VdbeChangeP4(v, -1, (char*)pStat1, P4_TABLE); #endif sqlite3VdbeChangeP5(v, OPFLAG_APPEND); /* Add the entries to the stat3 or stat4 table. */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 { int regEq = regStat1; int regLt = regStat1+1; int regDLt = regStat1+2; int regSample = regStat1+3; int regCol = regStat1+4; int regSampleRowid = regCol + nCol; int addrNext; int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; pParse->nMem = MAX(pParse->nMem, regCol+nCol); addrNext = sqlite3VdbeCurrentAddr(v); callStatGet(v, regStat4, STAT_GET_ROWID, regSampleRowid); addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid); VdbeCoverage(v); callStatGet(v, regStat4, STAT_GET_NEQ, regEq); callStatGet(v, regStat4, STAT_GET_NLT, regLt); callStatGet(v, regStat4, STAT_GET_NDLT, regDLt); sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0); VdbeCoverage(v); #ifdef SQLITE_ENABLE_STAT3 sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, 0, regSample); #else for(i=0; i<nCol; i++){ sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, i, regCol+i); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol, regSample); #endif sqlite3VdbeAddOp3(v, OP_MakeRecord, regTabname, 6, regTemp); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur+1, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur+1, regTemp, regNewRowid); sqlite3VdbeAddOp2(v, OP_Goto, 1, addrNext); /* P1==1 for end-of-loop */ sqlite3VdbeJumpHere(v, addrIsNull); } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* End of analysis */ sqlite3VdbeJumpHere(v, addrRewind); } /* Create a single sqlite_stat1 entry containing NULL as the index ** name and the row count as the content. */ if( pOnlyIdx==0 && needTableCnt ){ VdbeComment((v, "%s", pTab->zName)); sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1); jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); #ifdef SQLITE_ENABLE_PREUPDATE_HOOK sqlite3VdbeChangeP4(v, -1, (char*)pStat1, P4_TABLE); #endif sqlite3VdbeJumpHere(v, jZeroRows); } }
static void analyzeOneTable( Parse *pParse, /* Parser context */ Table *pTab, /* Table whose indices are to be analyzed */ Index *pOnlyIdx, /* If not NULL, only analyze this one index */ int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */ int iMem, /* Available memory locations begin here */ int iTab /* Next available cursor */ ){ sqlite3 *db = pParse->db; /* Database handle */ Index *pIdx; /* An index to being analyzed */ int iIdxCur; /* Cursor open on index being analyzed */ int iTabCur; /* Table cursor */ Vdbe *v; /* The virtual machine being built up */ int i; /* Loop counter */ int jZeroRows = -1; /* Jump from here if number of rows is zero */ int iDb; /* Index of database containing pTab */ u8 needTableCnt = 1; /* True to count the table */ int regNewRowid = iMem++; /* Rowid for the inserted record */ int regStat4 = iMem++; /* Register to hold Stat4Accum object */ int regChng = iMem++; /* Index of changed index field */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 int regRowid = iMem++; /* Rowid argument passed to stat_push() */ #endif int regTemp = iMem++; /* Temporary use register */ int regTabname = iMem++; /* Register containing table name */ int regIdxname = iMem++; /* Register containing index name */ int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ int regPrev = iMem; /* MUST BE LAST (see below) */ #ifdef SQLITE_ENABLE_PREUPDATE_HOOK Table *pStat1 = 0; #endif pParse->nMem = MAX(pParse->nMem, iMem); v = sqlite3GetVdbe(pParse); if( v==0 || NEVER(pTab==0) ){ return; } if( pTab->tnum==0 ){ /* Do not gather statistics on views or virtual tables */ return; } if( sqlite3_strlike("sqlite\\_%", pTab->zName, '\\')==0 ){ /* Do not gather statistics on system tables */ return; } assert( sqlite3BtreeHoldsAllMutexes(db) ); iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDb>=0 ); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); #ifndef SQLITE_OMIT_AUTHORIZATION if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0, db->aDb[iDb].zDbSName ) ){ return; } #endif #ifdef SQLITE_ENABLE_PREUPDATE_HOOK if( db->xPreUpdateCallback ){ pStat1 = (Table*)sqlite3DbMallocZero(db, sizeof(Table) + 13); if( pStat1==0 ) return; pStat1->zName = (char*)&pStat1[1]; memcpy(pStat1->zName, "sqlite_stat1", 13); pStat1->nCol = 3; pStat1->iPKey = -1; sqlite3VdbeAddOp4(pParse->pVdbe, OP_Noop, 0, 0, 0,(char*)pStat1,P4_DYNBLOB); } #endif /* Establish a read-lock on the table at the shared-cache level. ** Open a read-only cursor on the table. Also allocate a cursor number ** to use for scanning indexes (iIdxCur). No index cursor is opened at ** this time though. */ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); iTabCur = iTab++; iIdxCur = iTab++; pParse->nTab = MAX(pParse->nTab, iTab); sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); sqlite3VdbeLoadString(v, regTabname, pTab->zName); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int nCol; /* Number of columns in pIdx. "N" */ int addrRewind; /* Address of "OP_Rewind iIdxCur" */ int addrNextRow; /* Address of "next_row:" */ const char *zIdxName; /* Name of the index */ int nColTest; /* Number of columns to test for changes */ if( pOnlyIdx && pOnlyIdx!=pIdx ) continue; if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0; if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIdx) ){ nCol = pIdx->nKeyCol; zIdxName = pTab->zName; nColTest = nCol - 1; }else{ nCol = pIdx->nColumn; zIdxName = pIdx->zName; nColTest = pIdx->uniqNotNull ? pIdx->nKeyCol-1 : nCol-1; } /* Populate the register containing the index name. */ sqlite3VdbeLoadString(v, regIdxname, zIdxName); VdbeComment((v, "Analysis for %s.%s", pTab->zName, zIdxName)); /* ** Pseudo-code for loop that calls stat_push(): ** ** Rewind csr ** if eof(csr) goto end_of_scan; ** regChng = 0 ** goto chng_addr_0; ** ** next_row: ** regChng = 0 ** if( idx(0) != regPrev(0) ) goto chng_addr_0 ** regChng = 1 ** if( idx(1) != regPrev(1) ) goto chng_addr_1 ** ... ** regChng = N ** goto chng_addr_N ** ** chng_addr_0: ** regPrev(0) = idx(0) ** chng_addr_1: ** regPrev(1) = idx(1) ** ... ** ** endDistinctTest: ** regRowid = idx(rowid) ** stat_push(P, regChng, regRowid) ** Next csr ** if !eof(csr) goto next_row; ** ** end_of_scan: */ /* Make sure there are enough memory cells allocated to accommodate ** the regPrev array and a trailing rowid (the rowid slot is required ** when building a record to insert into the sample column of ** the sqlite_stat4 table. */ pParse->nMem = MAX(pParse->nMem, regPrev+nColTest); /* Open a read-only cursor on the index being analyzed. */ assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) ); sqlite3VdbeAddOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); /* Invoke the stat_init() function. The arguments are: ** ** (1) the number of columns in the index including the rowid ** (or for a WITHOUT ROWID table, the number of PK columns), ** (2) the number of columns in the key without the rowid/pk ** (3) the number of rows in the index, ** ** ** The third argument is only used for STAT3 and STAT4 */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+3); #endif sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat4+1); sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regStat4+2); sqlite3VdbeAddOp4(v, OP_Function0, 0, regStat4+1, regStat4, (char*)&statInitFuncdef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, 2+IsStat34); /* Implementation of the following: ** ** Rewind csr ** if eof(csr) goto end_of_scan; ** regChng = 0 ** goto next_push_0; ** */ addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); addrNextRow = sqlite3VdbeCurrentAddr(v); if( nColTest>0 ){ int endDistinctTest = sqlite3VdbeMakeLabel(pParse); int *aGotoChng; /* Array of jump instruction addresses */ aGotoChng = sqlite3DbMallocRawNN(db, sizeof(int)*nColTest); if( aGotoChng==0 ) continue; /* ** next_row: ** regChng = 0 ** if( idx(0) != regPrev(0) ) goto chng_addr_0 ** regChng = 1 ** if( idx(1) != regPrev(1) ) goto chng_addr_1 ** ... ** regChng = N ** goto endDistinctTest */ sqlite3VdbeAddOp0(v, OP_Goto); addrNextRow = sqlite3VdbeCurrentAddr(v); if( nColTest==1 && pIdx->nKeyCol==1 && IsUniqueIndex(pIdx) ){ /* For a single-column UNIQUE index, once we have found a non-NULL ** row, we know that all the rest will be distinct, so skip ** subsequent distinctness tests. */ sqlite3VdbeAddOp2(v, OP_NotNull, regPrev, endDistinctTest); VdbeCoverage(v); } for(i=0; i<nColTest; i++){ char *pColl = (char*)sqlite3LocateCollSeq(pParse, pIdx->azColl[i]); sqlite3VdbeAddOp2(v, OP_Integer, i, regChng); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp); aGotoChng[i] = sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ); sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); VdbeCoverage(v); } sqlite3VdbeAddOp2(v, OP_Integer, nColTest, regChng); sqlite3VdbeGoto(v, endDistinctTest); /* ** chng_addr_0: ** regPrev(0) = idx(0) ** chng_addr_1: ** regPrev(1) = idx(1) ** ... */ sqlite3VdbeJumpHere(v, addrNextRow-1); for(i=0; i<nColTest; i++){ sqlite3VdbeJumpHere(v, aGotoChng[i]); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regPrev+i); } sqlite3VdbeResolveLabel(v, endDistinctTest); sqlite3DbFree(db, aGotoChng); } /* ** chng_addr_N: ** regRowid = idx(rowid) // STAT34 only ** stat_push(P, regChng, regRowid) // 3rd parameter STAT34 only ** Next csr ** if !eof(csr) goto next_row; */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 assert( regRowid==(regStat4+2) ); if( HasRowid(pTab) ){ sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, regRowid); }else{ Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable); int j, k, regKey; regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol); for(j=0; j<pPk->nKeyCol; j++){ k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); assert( k>=0 && k<pIdx->nColumn ); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j); VdbeComment((v, "%s", pTab->aCol[pPk->aiColumn[j]].zName)); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regKey, pPk->nKeyCol, regRowid); sqlite3ReleaseTempRange(pParse, regKey, pPk->nKeyCol); } #endif assert( regChng==(regStat4+1) ); sqlite3VdbeAddOp4(v, OP_Function0, 1, regStat4, regTemp, (char*)&statPushFuncdef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, 2+IsStat34); sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v); /* Add the entry to the stat1 table. */ callStatGet(v, regStat4, STAT_GET_STAT1, regStat1); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); #ifdef SQLITE_ENABLE_PREUPDATE_HOOK sqlite3VdbeChangeP4(v, -1, (char*)pStat1, P4_TABLE); #endif sqlite3VdbeChangeP5(v, OPFLAG_APPEND); /* Add the entries to the stat3 or stat4 table. */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 { int regEq = regStat1; int regLt = regStat1+1; int regDLt = regStat1+2; int regSample = regStat1+3; int regCol = regStat1+4; int regSampleRowid = regCol + nCol; int addrNext; int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; pParse->nMem = MAX(pParse->nMem, regCol+nCol); addrNext = sqlite3VdbeCurrentAddr(v); callStatGet(v, regStat4, STAT_GET_ROWID, regSampleRowid); addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid); VdbeCoverage(v); callStatGet(v, regStat4, STAT_GET_NEQ, regEq); callStatGet(v, regStat4, STAT_GET_NLT, regLt); callStatGet(v, regStat4, STAT_GET_NDLT, regDLt); sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0); VdbeCoverage(v); #ifdef SQLITE_ENABLE_STAT3 sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, 0, regSample); #else for(i=0; i<nCol; i++){ sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, i, regCol+i); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol, regSample); #endif sqlite3VdbeAddOp3(v, OP_MakeRecord, regTabname, 6, regTemp); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur+1, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur+1, regTemp, regNewRowid); sqlite3VdbeAddOp2(v, OP_Goto, 1, addrNext); /* P1==1 for end-of-loop */ sqlite3VdbeJumpHere(v, addrIsNull); } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* End of analysis */ sqlite3VdbeJumpHere(v, addrRewind); } /* Create a single sqlite_stat1 entry containing NULL as the index ** name and the row count as the content. */ if( pOnlyIdx==0 && needTableCnt ){ VdbeComment((v, "%s", pTab->zName)); sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1); jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); #ifdef SQLITE_ENABLE_PREUPDATE_HOOK sqlite3VdbeChangeP4(v, -1, (char*)pStat1, P4_TABLE); #endif sqlite3VdbeJumpHere(v, jZeroRows); } }
C
Chrome
0
CVE-2019-5754
https://www.cvedetails.com/cve/CVE-2019-5754/
CWE-310
https://github.com/chromium/chromium/commit/fd2335678e96c34d14f4b20f0d9613dfbd1ccdb4
fd2335678e96c34d14f4b20f0d9613dfbd1ccdb4
Fix a bug in network_session_configurator.cc in which support for HTTPS URLS in QUIC proxies was always set to false. BUG=914497 Change-Id: I56ad16088168302598bb448553ba32795eee3756 Reviewed-on: https://chromium-review.googlesource.com/c/1417356 Auto-Submit: Ryan Hamilton <[email protected]> Commit-Queue: Zhongyi Shi <[email protected]> Reviewed-by: Zhongyi Shi <[email protected]> Cr-Commit-Position: refs/heads/master@{#623763}
bool ShouldQuicAllowServerMigration( const VariationParameters& quic_trial_params) { return base::LowerCaseEqualsASCII( GetVariationParam(quic_trial_params, "allow_server_migration"), "true"); }
bool ShouldQuicAllowServerMigration( const VariationParameters& quic_trial_params) { return base::LowerCaseEqualsASCII( GetVariationParam(quic_trial_params, "allow_server_migration"), "true"); }
C
Chrome
0
CVE-2011-3083
https://www.cvedetails.com/cve/CVE-2011-3083/
CWE-119
https://github.com/chromium/chromium/commit/d6b061bf189e0661a3d94d89dbcb2e6f70b433da
d6b061bf189e0661a3d94d89dbcb2e6f70b433da
Give the media context an ftp job factory; prevent a browser crash. BUG=112983 TEST=none Review URL: http://codereview.chromium.org/9372002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@121378 0039d316-1c4b-4281-b951-d872f2087c98
void ClearNetworkingHistorySinceOnIOThread( ProfileImplIOData* io_data, base::Time time) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); io_data->transport_security_state()->DeleteSince(time); io_data->http_server_properties()->Clear(); }
void ClearNetworkingHistorySinceOnIOThread( ProfileImplIOData* io_data, base::Time time) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); io_data->transport_security_state()->DeleteSince(time); io_data->http_server_properties()->Clear(); }
C
Chrome
0
CVE-2015-1216
https://www.cvedetails.com/cve/CVE-2015-1216/
null
https://github.com/chromium/chromium/commit/82eeef54780833a29e88c5677a7cfa11205a9878
82eeef54780833a29e88c5677a7cfa11205a9878
Reload frame in V8Window::namedPropertyGetterCustom after js call [email protected] BUG=454954 Review URL: https://codereview.chromium.org/901053006 git-svn-id: svn://svn.chromium.org/blink/trunk@189574 bbb929c8-8fbe-4397-9dbb-9b2b20218538
bool V8Window::indexedSecurityCheckCustom(v8::Local<v8::Object> host, uint32_t index, v8::AccessType type, v8::Local<v8::Value>) { v8::Isolate* isolate = v8::Isolate::GetCurrent(); v8::Handle<v8::Object> window = V8Window::findInstanceInPrototypeChain(host, isolate); if (window.IsEmpty()) return false; DOMWindow* targetWindow = V8Window::toImpl(window); ASSERT(targetWindow); if (!targetWindow->isLocalDOMWindow()) return false; LocalFrame* target = toLocalDOMWindow(targetWindow)->frame(); if (!target) return false; if (target->loader().stateMachine()->isDisplayingInitialEmptyDocument()) target->loader().didAccessInitialDocument(); Frame* childFrame = target->tree().scopedChild(index); if (type == v8::ACCESS_HAS && childFrame) return true; if (type == v8::ACCESS_GET && childFrame && !host->HasRealIndexedProperty(index) && !window->HasRealIndexedProperty(index)) return true; return BindingSecurity::shouldAllowAccessToFrame(isolate, target, DoNotReportSecurityError); }
bool V8Window::indexedSecurityCheckCustom(v8::Local<v8::Object> host, uint32_t index, v8::AccessType type, v8::Local<v8::Value>) { v8::Isolate* isolate = v8::Isolate::GetCurrent(); v8::Handle<v8::Object> window = V8Window::findInstanceInPrototypeChain(host, isolate); if (window.IsEmpty()) return false; DOMWindow* targetWindow = V8Window::toImpl(window); ASSERT(targetWindow); if (!targetWindow->isLocalDOMWindow()) return false; LocalFrame* target = toLocalDOMWindow(targetWindow)->frame(); if (!target) return false; if (target->loader().stateMachine()->isDisplayingInitialEmptyDocument()) target->loader().didAccessInitialDocument(); Frame* childFrame = target->tree().scopedChild(index); if (type == v8::ACCESS_HAS && childFrame) return true; if (type == v8::ACCESS_GET && childFrame && !host->HasRealIndexedProperty(index) && !window->HasRealIndexedProperty(index)) return true; return BindingSecurity::shouldAllowAccessToFrame(isolate, target, DoNotReportSecurityError); }
C
Chrome
0
CVE-2015-2304
https://www.cvedetails.com/cve/CVE-2015-2304/
CWE-22
https://github.com/libarchive/libarchive/commit/59357157706d47c365b2227739e17daba3607526
59357157706d47c365b2227739e17daba3607526
Add ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS option This fixes a directory traversal in the cpio tool.
set_xattrs(struct archive_write_disk *a) { struct archive_entry *entry = a->entry; static int warning_done = 0; int ret = ARCHIVE_OK; int i = archive_entry_xattr_reset(entry); while (i--) { const char *name; const void *value; size_t size; archive_entry_xattr_next(entry, &name, &value, &size); if (name != NULL && strncmp(name, "xfsroot.", 8) != 0 && strncmp(name, "system.", 7) != 0) { int e; #if HAVE_FSETXATTR if (a->fd >= 0) e = fsetxattr(a->fd, name, value, size, 0); else #elif HAVE_FSETEA if (a->fd >= 0) e = fsetea(a->fd, name, value, size, 0); else #endif { #if HAVE_LSETXATTR e = lsetxattr(archive_entry_pathname(entry), name, value, size, 0); #elif HAVE_LSETEA e = lsetea(archive_entry_pathname(entry), name, value, size, 0); #endif } if (e == -1) { if (errno == ENOTSUP || errno == ENOSYS) { if (!warning_done) { warning_done = 1; archive_set_error(&a->archive, errno, "Cannot restore extended " "attributes on this file " "system"); } } else archive_set_error(&a->archive, errno, "Failed to set extended attribute"); ret = ARCHIVE_WARN; } } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid extended attribute encountered"); ret = ARCHIVE_WARN; } } return (ret); }
set_xattrs(struct archive_write_disk *a) { struct archive_entry *entry = a->entry; static int warning_done = 0; int ret = ARCHIVE_OK; int i = archive_entry_xattr_reset(entry); while (i--) { const char *name; const void *value; size_t size; archive_entry_xattr_next(entry, &name, &value, &size); if (name != NULL && strncmp(name, "xfsroot.", 8) != 0 && strncmp(name, "system.", 7) != 0) { int e; #if HAVE_FSETXATTR if (a->fd >= 0) e = fsetxattr(a->fd, name, value, size, 0); else #elif HAVE_FSETEA if (a->fd >= 0) e = fsetea(a->fd, name, value, size, 0); else #endif { #if HAVE_LSETXATTR e = lsetxattr(archive_entry_pathname(entry), name, value, size, 0); #elif HAVE_LSETEA e = lsetea(archive_entry_pathname(entry), name, value, size, 0); #endif } if (e == -1) { if (errno == ENOTSUP || errno == ENOSYS) { if (!warning_done) { warning_done = 1; archive_set_error(&a->archive, errno, "Cannot restore extended " "attributes on this file " "system"); } } else archive_set_error(&a->archive, errno, "Failed to set extended attribute"); ret = ARCHIVE_WARN; } } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid extended attribute encountered"); ret = ARCHIVE_WARN; } } return (ret); }
C
libarchive
0
CVE-2015-3412
https://www.cvedetails.com/cve/CVE-2015-3412/
CWE-254
https://git.php.net/?p=php-src.git;a=commit;h=4435b9142ff9813845d5c97ab29a5d637bedb257
4435b9142ff9813845d5c97ab29a5d637bedb257
null
PHP_FUNCTION(dom_document_import_node) { zval *id, *node; xmlDocPtr docp; xmlNodePtr nodep, retnodep; dom_object *intern, *nodeobj; int ret; long recursive = 0; if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "OO|l", &id, dom_document_class_entry, &node, dom_node_class_entry, &recursive) == FAILURE) { return; } DOM_GET_OBJ(docp, id, xmlDocPtr, intern); DOM_GET_OBJ(nodep, node, xmlNodePtr, nodeobj); if (nodep->type == XML_HTML_DOCUMENT_NODE || nodep->type == XML_DOCUMENT_NODE || nodep->type == XML_DOCUMENT_TYPE_NODE) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot import: Node Type Not Supported"); RETURN_FALSE; } if (nodep->doc == docp) { retnodep = nodep; } else { if ((recursive == 0) && (nodep->type == XML_ELEMENT_NODE)) { recursive = 2; } retnodep = xmlDocCopyNode(nodep, docp, recursive); if (!retnodep) { RETURN_FALSE; } if ((retnodep->type == XML_ATTRIBUTE_NODE) && (nodep->ns != NULL)) { xmlNsPtr nsptr = NULL; xmlNodePtr root = xmlDocGetRootElement(docp); nsptr = xmlSearchNsByHref (nodep->doc, root, nodep->ns->href); if (nsptr == NULL) { int errorcode; nsptr = dom_get_ns(root, (char *) nodep->ns->href, &errorcode, (char *) nodep->ns->prefix); } xmlSetNs(retnodep, nsptr); } } DOM_RET_OBJ((xmlNodePtr) retnodep, &ret, intern); }
PHP_FUNCTION(dom_document_import_node) { zval *id, *node; xmlDocPtr docp; xmlNodePtr nodep, retnodep; dom_object *intern, *nodeobj; int ret; long recursive = 0; if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "OO|l", &id, dom_document_class_entry, &node, dom_node_class_entry, &recursive) == FAILURE) { return; } DOM_GET_OBJ(docp, id, xmlDocPtr, intern); DOM_GET_OBJ(nodep, node, xmlNodePtr, nodeobj); if (nodep->type == XML_HTML_DOCUMENT_NODE || nodep->type == XML_DOCUMENT_NODE || nodep->type == XML_DOCUMENT_TYPE_NODE) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot import: Node Type Not Supported"); RETURN_FALSE; } if (nodep->doc == docp) { retnodep = nodep; } else { if ((recursive == 0) && (nodep->type == XML_ELEMENT_NODE)) { recursive = 2; } retnodep = xmlDocCopyNode(nodep, docp, recursive); if (!retnodep) { RETURN_FALSE; } if ((retnodep->type == XML_ATTRIBUTE_NODE) && (nodep->ns != NULL)) { xmlNsPtr nsptr = NULL; xmlNodePtr root = xmlDocGetRootElement(docp); nsptr = xmlSearchNsByHref (nodep->doc, root, nodep->ns->href); if (nsptr == NULL) { int errorcode; nsptr = dom_get_ns(root, (char *) nodep->ns->href, &errorcode, (char *) nodep->ns->prefix); } xmlSetNs(retnodep, nsptr); } } DOM_RET_OBJ((xmlNodePtr) retnodep, &ret, intern); }
C
php
0
CVE-2016-1620
https://www.cvedetails.com/cve/CVE-2016-1620/
null
https://github.com/chromium/chromium/commit/b90c7c8c335a2e2a4abdd7bde17a44f92c8b3a54
b90c7c8c335a2e2a4abdd7bde17a44f92c8b3a54
Fix GPU process fallback logic. 1. In GpuProcessHost::OnProcessCrashed() record the process crash first. This means the GPU mode fallback will happen before a new GPU process is started. 2. Don't call FallBackToNextGpuMode() if GPU process initialization fails for an unsandboxed GPU process. The unsandboxed GPU is only used for collect information and it's failure doesn't indicate a need to change GPU modes. Bug: 869419 Change-Id: I8bd0a03268f0ea8809f3df8458d4e6a92db9391f Reviewed-on: https://chromium-review.googlesource.com/1157164 Reviewed-by: Zhenyao Mo <[email protected]> Commit-Queue: kylechar <[email protected]> Cr-Commit-Position: refs/heads/master@{#579625}
bool GpuProcessHost::Init() { init_start_time_ = base::TimeTicks::Now(); TRACE_EVENT_INSTANT0("gpu", "LaunchGpuProcess", TRACE_EVENT_SCOPE_THREAD); if (ServiceManagerConnection::GetForProcess()) { ServiceManagerConnection::GetForProcess()->AddConnectionFilter( std::make_unique<ConnectionFilterImpl>(process_->GetData().id)); } process_->GetHost()->CreateChannelMojo(); mode_ = GpuDataManagerImpl::GetInstance()->GetGpuMode(); DCHECK_NE(mode_, gpu::GpuMode::DISABLED); if (in_process_) { DCHECK_CURRENTLY_ON(BrowserThread::IO); DCHECK(GetGpuMainThreadFactory()); gpu::GpuPreferences gpu_preferences = GetGpuPreferencesFromCommandLine(); GpuDataManagerImpl::GetInstance()->UpdateGpuPreferences(&gpu_preferences); in_process_gpu_thread_.reset(GetGpuMainThreadFactory()( InProcessChildThreadParams( base::ThreadTaskRunnerHandle::Get(), process_->GetInProcessMojoInvitation(), process_->child_connection()->service_token()), gpu_preferences)); base::Thread::Options options; #if defined(OS_WIN) || defined(OS_MACOSX) options.message_loop_type = base::MessageLoop::TYPE_UI; #endif #if defined(OS_ANDROID) || defined(OS_CHROMEOS) options.priority = base::ThreadPriority::DISPLAY; #endif in_process_gpu_thread_->StartWithOptions(options); OnProcessLaunched(); // Fake a callback that the process is ready. } else if (!LaunchGpuProcess()) { return false; } process_->child_channel() ->GetAssociatedInterfaceSupport() ->GetRemoteAssociatedInterface(&gpu_main_ptr_); viz::mojom::GpuHostPtr host_proxy; gpu_host_binding_.Bind(mojo::MakeRequest(&host_proxy)); discardable_memory::mojom::DiscardableSharedMemoryManagerPtr discardable_manager_ptr; auto discardable_request = mojo::MakeRequest(&discardable_manager_ptr); BrowserThread::PostTask(BrowserThread::UI, FROM_HERE, base::BindOnce(&BindDiscardableMemoryRequestOnUI, std::move(discardable_request))); DCHECK(GetFontRenderParamsOnIO().initialized); gpu_main_ptr_->CreateGpuService( mojo::MakeRequest(&gpu_service_ptr_), std::move(host_proxy), std::move(discardable_manager_ptr), activity_flags_.CloneHandle(), GetFontRenderParamsOnIO().params.subpixel_rendering); #if defined(USE_OZONE) InitOzone(); #endif // defined(USE_OZONE) return true; }
bool GpuProcessHost::Init() { init_start_time_ = base::TimeTicks::Now(); TRACE_EVENT_INSTANT0("gpu", "LaunchGpuProcess", TRACE_EVENT_SCOPE_THREAD); if (ServiceManagerConnection::GetForProcess()) { ServiceManagerConnection::GetForProcess()->AddConnectionFilter( std::make_unique<ConnectionFilterImpl>(process_->GetData().id)); } process_->GetHost()->CreateChannelMojo(); mode_ = GpuDataManagerImpl::GetInstance()->GetGpuMode(); DCHECK_NE(mode_, gpu::GpuMode::DISABLED); if (in_process_) { DCHECK_CURRENTLY_ON(BrowserThread::IO); DCHECK(GetGpuMainThreadFactory()); gpu::GpuPreferences gpu_preferences = GetGpuPreferencesFromCommandLine(); GpuDataManagerImpl::GetInstance()->UpdateGpuPreferences(&gpu_preferences); in_process_gpu_thread_.reset(GetGpuMainThreadFactory()( InProcessChildThreadParams( base::ThreadTaskRunnerHandle::Get(), process_->GetInProcessMojoInvitation(), process_->child_connection()->service_token()), gpu_preferences)); base::Thread::Options options; #if defined(OS_WIN) || defined(OS_MACOSX) options.message_loop_type = base::MessageLoop::TYPE_UI; #endif #if defined(OS_ANDROID) || defined(OS_CHROMEOS) options.priority = base::ThreadPriority::DISPLAY; #endif in_process_gpu_thread_->StartWithOptions(options); OnProcessLaunched(); // Fake a callback that the process is ready. } else if (!LaunchGpuProcess()) { return false; } process_->child_channel() ->GetAssociatedInterfaceSupport() ->GetRemoteAssociatedInterface(&gpu_main_ptr_); viz::mojom::GpuHostPtr host_proxy; gpu_host_binding_.Bind(mojo::MakeRequest(&host_proxy)); discardable_memory::mojom::DiscardableSharedMemoryManagerPtr discardable_manager_ptr; auto discardable_request = mojo::MakeRequest(&discardable_manager_ptr); BrowserThread::PostTask(BrowserThread::UI, FROM_HERE, base::BindOnce(&BindDiscardableMemoryRequestOnUI, std::move(discardable_request))); DCHECK(GetFontRenderParamsOnIO().initialized); gpu_main_ptr_->CreateGpuService( mojo::MakeRequest(&gpu_service_ptr_), std::move(host_proxy), std::move(discardable_manager_ptr), activity_flags_.CloneHandle(), GetFontRenderParamsOnIO().params.subpixel_rendering); #if defined(USE_OZONE) InitOzone(); #endif // defined(USE_OZONE) return true; }
C
Chrome
0
CVE-2012-2121
https://www.cvedetails.com/cve/CVE-2012-2121/
CWE-264
https://github.com/torvalds/linux/commit/09ca8e1173bcb12e2a449698c9ae3b86a8a10195
09ca8e1173bcb12e2a449698c9ae3b86a8a10195
KVM: unmap pages from the iommu when slots are removed commit 32f6daad4651a748a58a3ab6da0611862175722f upstream. We've been adding new mappings, but not destroying old mappings. This can lead to a page leak as pages are pinned using get_user_pages, but only unpinned with put_page if they still exist in the memslots list on vm shutdown. A memslot that is destroyed while an iommu domain is enabled for the guest will therefore result in an elevated page reference count that is never cleared. Additionally, without this fix, the iommu is only programmed with the first translation for a gpa. This can result in peer-to-peer errors if a mapping is destroyed and replaced by a new mapping at the same gpa as the iommu will still be pointing to the original, pinned memory address. Signed-off-by: Alex Williamson <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu, *v; vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); r = kvm_arch_vcpu_setup(vcpu); if (r) goto vcpu_destroy; mutex_lock(&kvm->lock); if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { r = -EINVAL; goto unlock_vcpu_destroy; } kvm_for_each_vcpu(r, v, kvm) if (v->vcpu_id == id) { r = -EEXIST; goto unlock_vcpu_destroy; } BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { kvm_put_kvm(kvm); goto unlock_vcpu_destroy; } kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; smp_wmb(); atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); return r; unlock_vcpu_destroy: mutex_unlock(&kvm->lock); vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); return r; }
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) { int r; struct kvm_vcpu *vcpu, *v; vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) return PTR_ERR(vcpu); preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); r = kvm_arch_vcpu_setup(vcpu); if (r) goto vcpu_destroy; mutex_lock(&kvm->lock); if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { r = -EINVAL; goto unlock_vcpu_destroy; } kvm_for_each_vcpu(r, v, kvm) if (v->vcpu_id == id) { r = -EEXIST; goto unlock_vcpu_destroy; } BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); /* Now it's all set up, let userspace reach it */ kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) { kvm_put_kvm(kvm); goto unlock_vcpu_destroy; } kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; smp_wmb(); atomic_inc(&kvm->online_vcpus); mutex_unlock(&kvm->lock); return r; unlock_vcpu_destroy: mutex_unlock(&kvm->lock); vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); return r; }
C
linux
0
CVE-2018-6154
https://www.cvedetails.com/cve/CVE-2018-6154/
CWE-119
https://github.com/chromium/chromium/commit/98095c718d7580b5d6715e5bfd8698234ecb4470
98095c718d7580b5d6715e5bfd8698234ecb4470
Validate all incoming WebGLObjects. A few entry points were missing the correct validation. Tested with improved conformance tests in https://github.com/KhronosGroup/WebGL/pull/2654 . Bug: 848914 Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel Change-Id: Ib98a61cc5bf378d1b3338b04acd7e1bc4c2fe008 Reviewed-on: https://chromium-review.googlesource.com/1086718 Reviewed-by: Kai Ninomiya <[email protected]> Reviewed-by: Antoine Labour <[email protected]> Commit-Queue: Kenneth Russell <[email protected]> Cr-Commit-Position: refs/heads/master@{#565016}
WebGL2RenderingContextBase::WebGL2RenderingContextBase( CanvasRenderingContextHost* host, std::unique_ptr<WebGraphicsContext3DProvider> context_provider, bool using_gpu_compositing, const CanvasContextCreationAttributesCore& requested_attributes) : WebGLRenderingContextBase(host, std::move(context_provider), using_gpu_compositing, requested_attributes, 2) { supported_internal_formats_storage_.insert( kSupportedInternalFormatsStorage, kSupportedInternalFormatsStorage + arraysize(kSupportedInternalFormatsStorage)); }
WebGL2RenderingContextBase::WebGL2RenderingContextBase( CanvasRenderingContextHost* host, std::unique_ptr<WebGraphicsContext3DProvider> context_provider, bool using_gpu_compositing, const CanvasContextCreationAttributesCore& requested_attributes) : WebGLRenderingContextBase(host, std::move(context_provider), using_gpu_compositing, requested_attributes, 2) { supported_internal_formats_storage_.insert( kSupportedInternalFormatsStorage, kSupportedInternalFormatsStorage + arraysize(kSupportedInternalFormatsStorage)); }
C
Chrome
0
CVE-2013-2206
https://www.cvedetails.com/cve/CVE-2013-2206/
null
https://github.com/torvalds/linux/commit/f2815633504b442ca0b0605c16bf3d88a3a0fcea
f2815633504b442ca0b0605c16bf3d88a3a0fcea
sctp: Use correct sideffect command in duplicate cookie handling When SCTP is done processing a duplicate cookie chunk, it tries to delete a newly created association. For that, it has to set the right association for the side-effect processing to work. However, when it uses the SCTP_CMD_NEW_ASOC command, that performs more work then really needed (like hashing the associationa and assigning it an id) and there is no point to do that only to delete the association as a next step. In fact, it also creates an impossible condition where an association may be found by the getsockopt() call, and that association is empty. This causes a crash in some sctp getsockopts. The solution is rather simple. We simply use SCTP_CMD_SET_ASOC command that doesn't have all the overhead and does exactly what we need. Reported-by: Karl Heiss <[email protected]> Tested-by: Karl Heiss <[email protected]> CC: Neil Horman <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Acked-by: Neil Horman <[email protected]> Signed-off-by: David S. Miller <[email protected]>
sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* Call helper to do the real work for both simulataneous and * duplicate INIT chunk handling. */ return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); }
sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { /* Call helper to do the real work for both simulataneous and * duplicate INIT chunk handling. */ return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); }
C
linux
0
CVE-2018-14351
https://www.cvedetails.com/cve/CVE-2018-14351/
CWE-20
https://github.com/neomutt/neomutt/commit/3c49c44be9b459d9c616bcaef6eb5d51298c1741
3c49c44be9b459d9c616bcaef6eb5d51298c1741
Ensure litlen isn't larger than our mailbox
static void cmd_parse_lsub(struct ImapData *idata, char *s) { char buf[STRING]; char errstr[STRING]; struct Buffer err, token; struct Url url; struct ImapList list; if (idata->cmddata && idata->cmdtype == IMAP_CT_LIST) { /* caller will handle response itself */ cmd_parse_list(idata, s); return; } if (!ImapCheckSubscribed) return; idata->cmdtype = IMAP_CT_LIST; idata->cmddata = &list; cmd_parse_list(idata, s); idata->cmddata = NULL; /* noselect is for a gmail quirk (#3445) */ if (!list.name || list.noselect) return; mutt_debug(3, "Subscribing to %s\n", list.name); mutt_str_strfcpy(buf, "mailboxes \"", sizeof(buf)); mutt_account_tourl(&idata->conn->account, &url); /* escape \ and " */ imap_quote_string(errstr, sizeof(errstr), list.name, true); url.path = errstr + 1; url.path[strlen(url.path) - 1] = '\0'; if (mutt_str_strcmp(url.user, ImapUser) == 0) url.user = NULL; url_tostring(&url, buf + 11, sizeof(buf) - 11, 0); mutt_str_strcat(buf, sizeof(buf), "\""); mutt_buffer_init(&token); mutt_buffer_init(&err); err.data = errstr; err.dsize = sizeof(errstr); if (mutt_parse_rc_line(buf, &token, &err)) mutt_debug(1, "Error adding subscribed mailbox: %s\n", errstr); FREE(&token.data); }
static void cmd_parse_lsub(struct ImapData *idata, char *s) { char buf[STRING]; char errstr[STRING]; struct Buffer err, token; struct Url url; struct ImapList list; if (idata->cmddata && idata->cmdtype == IMAP_CT_LIST) { /* caller will handle response itself */ cmd_parse_list(idata, s); return; } if (!ImapCheckSubscribed) return; idata->cmdtype = IMAP_CT_LIST; idata->cmddata = &list; cmd_parse_list(idata, s); idata->cmddata = NULL; /* noselect is for a gmail quirk (#3445) */ if (!list.name || list.noselect) return; mutt_debug(3, "Subscribing to %s\n", list.name); mutt_str_strfcpy(buf, "mailboxes \"", sizeof(buf)); mutt_account_tourl(&idata->conn->account, &url); /* escape \ and " */ imap_quote_string(errstr, sizeof(errstr), list.name, true); url.path = errstr + 1; url.path[strlen(url.path) - 1] = '\0'; if (mutt_str_strcmp(url.user, ImapUser) == 0) url.user = NULL; url_tostring(&url, buf + 11, sizeof(buf) - 11, 0); mutt_str_strcat(buf, sizeof(buf), "\""); mutt_buffer_init(&token); mutt_buffer_init(&err); err.data = errstr; err.dsize = sizeof(errstr); if (mutt_parse_rc_line(buf, &token, &err)) mutt_debug(1, "Error adding subscribed mailbox: %s\n", errstr); FREE(&token.data); }
C
neomutt
0
CVE-2014-2669
https://www.cvedetails.com/cve/CVE-2014-2669/
CWE-189
https://github.com/postgres/postgres/commit/31400a673325147e1205326008e32135a78b4d8a
31400a673325147e1205326008e32135a78b4d8a
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
circle_area(PG_FUNCTION_ARGS) { CIRCLE *circle = PG_GETARG_CIRCLE_P(0); PG_RETURN_FLOAT8(circle_ar(circle)); }
circle_area(PG_FUNCTION_ARGS) { CIRCLE *circle = PG_GETARG_CIRCLE_P(0); PG_RETURN_FLOAT8(circle_ar(circle)); }
C
postgres
0
CVE-2013-7421
https://www.cvedetails.com/cve/CVE-2013-7421/
CWE-264
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
crypto: prefix module autoloading with "crypto-" This prefixes all crypto module loading with "crypto-" so we never run the risk of exposing module auto-loading to userspace via a crypto API, as demonstrated by Mathias Krause: https://lkml.org/lkml/2013/3/4/70 Signed-off-by: Kees Cook <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
static int lz4_init(struct crypto_tfm *tfm) { struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS); if (!ctx->lz4_comp_mem) return -ENOMEM; return 0; }
static int lz4_init(struct crypto_tfm *tfm) { struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS); if (!ctx->lz4_comp_mem) return -ENOMEM; return 0; }
C
linux
0
CVE-2011-4127
https://www.cvedetails.com/cve/CVE-2011-4127/
CWE-264
https://github.com/torvalds/linux/commit/0bfc96cb77224736dfa35c3c555d37b3646ef35e
0bfc96cb77224736dfa35c3c555d37b3646ef35e
block: fail SCSI passthrough ioctls on partition devices Linux allows executing the SG_IO ioctl on a partition or LVM volume, and will pass the command to the underlying block device. This is well-known, but it is also a large security problem when (via Unix permissions, ACLs, SELinux or a combination thereof) a program or user needs to be granted access only to part of the disk. This patch lets partitions forward a small set of harmless ioctls; others are logged with printk so that we can see which ioctls are actually sent. In my tests only CDROM_GET_CAPABILITY actually occurred. Of course it was being sent to a (partition on a) hard disk, so it would have failed with ENOTTY and the patch isn't changing anything in practice. Still, I'm treating it specially to avoid spamming the logs. In principle, this restriction should include programs running with CAP_SYS_RAWIO. If for example I let a program access /dev/sda2 and /dev/sdb, it still should not be able to read/write outside the boundaries of /dev/sda2 independent of the capabilities. However, for now programs with CAP_SYS_RAWIO will still be allowed to send the ioctls. Their actions will still be logged. This patch does not affect the non-libata IDE driver. That driver however already tests for bd != bd->bd_contains before issuing some ioctl; it could be restricted further to forbid these ioctls even for programs running with CAP_SYS_ADMIN/CAP_SYS_RAWIO. Cc: [email protected] Cc: Jens Axboe <[email protected]> Cc: James Bottomley <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> [ Make it also print the command name when warning - Linus ] Signed-off-by: Linus Torvalds <[email protected]>
static void scsi_disk_put(struct scsi_disk *sdkp) { struct scsi_device *sdev = sdkp->device; mutex_lock(&sd_ref_mutex); put_device(&sdkp->dev); scsi_device_put(sdev); mutex_unlock(&sd_ref_mutex); }
static void scsi_disk_put(struct scsi_disk *sdkp) { struct scsi_device *sdev = sdkp->device; mutex_lock(&sd_ref_mutex); put_device(&sdkp->dev); scsi_device_put(sdev); mutex_unlock(&sd_ref_mutex); }
C
linux
0
CVE-2018-13785
https://www.cvedetails.com/cve/CVE-2018-13785/
CWE-190
https://github.com/glennrp/libpng/commit/8a05766cb74af05c04c53e6c9d60c13fc4d59bf2
8a05766cb74af05c04c53e6c9d60c13fc4d59bf2
[libpng16] Fix the calculation of row_factor in png_check_chunk_length (Bug report by Thuan Pham, SourceForge issue #278)
png_handle_eXIf(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length) { unsigned int i; png_debug(1, "in png_handle_eXIf"); if ((png_ptr->mode & PNG_HAVE_IHDR) == 0) png_chunk_error(png_ptr, "missing IHDR"); if (length < 2) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "too short"); return; } else if (info_ptr == NULL || (info_ptr->valid & PNG_INFO_eXIf) != 0) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "duplicate"); return; } info_ptr->free_me |= PNG_FREE_EXIF; info_ptr->eXIf_buf = png_voidcast(png_bytep, png_malloc_warn(png_ptr, length)); if (info_ptr->eXIf_buf == NULL) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "out of memory"); return; } for (i = 0; i < length; i++) { png_byte buf[1]; png_crc_read(png_ptr, buf, 1); info_ptr->eXIf_buf[i] = buf[0]; if (i == 1 && buf[0] != 'M' && buf[0] != 'I' && info_ptr->eXIf_buf[0] != buf[0]) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "incorrect byte-order specifier"); png_free(png_ptr, info_ptr->eXIf_buf); info_ptr->eXIf_buf = NULL; return; } } if (png_crc_finish(png_ptr, 0) != 0) return; png_set_eXIf_1(png_ptr, info_ptr, length, info_ptr->eXIf_buf); png_free(png_ptr, info_ptr->eXIf_buf); info_ptr->eXIf_buf = NULL; }
png_handle_eXIf(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length) { unsigned int i; png_debug(1, "in png_handle_eXIf"); if ((png_ptr->mode & PNG_HAVE_IHDR) == 0) png_chunk_error(png_ptr, "missing IHDR"); if (length < 2) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "too short"); return; } else if (info_ptr == NULL || (info_ptr->valid & PNG_INFO_eXIf) != 0) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "duplicate"); return; } info_ptr->free_me |= PNG_FREE_EXIF; info_ptr->eXIf_buf = png_voidcast(png_bytep, png_malloc_warn(png_ptr, length)); if (info_ptr->eXIf_buf == NULL) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "out of memory"); return; } for (i = 0; i < length; i++) { png_byte buf[1]; png_crc_read(png_ptr, buf, 1); info_ptr->eXIf_buf[i] = buf[0]; if (i == 1 && buf[0] != 'M' && buf[0] != 'I' && info_ptr->eXIf_buf[0] != buf[0]) { png_crc_finish(png_ptr, length); png_chunk_benign_error(png_ptr, "incorrect byte-order specifier"); png_free(png_ptr, info_ptr->eXIf_buf); info_ptr->eXIf_buf = NULL; return; } } if (png_crc_finish(png_ptr, 0) != 0) return; png_set_eXIf_1(png_ptr, info_ptr, length, info_ptr->eXIf_buf); png_free(png_ptr, info_ptr->eXIf_buf); info_ptr->eXIf_buf = NULL; }
C
libpng
0